feature 8029 change RO to python3. Using vim plugins 20/8020/10
authortierno <alfonso.tiernosepulveda@telefonica.com>
Fri, 4 Oct 2019 12:56:31 +0000 (12:56 +0000)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Fri, 25 Oct 2019 08:52:48 +0000 (08:52 +0000)
Change-Id: I1e7bf61db9c39c66e0233c81bd8b4caa6650d389
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
322 files changed:
Dockerfile
Dockerfile-local [new file with mode: 0644]
MANIFEST.in [deleted file]
Makefile
README.rst [deleted file]
RO-VIM-aws/Makefile [new file with mode: 0644]
RO-VIM-aws/osm_rovim_aws/vimconn_aws.py [new file with mode: 0644]
RO-VIM-aws/requirements.txt [new file with mode: 0644]
RO-VIM-aws/setup.py [new file with mode: 0644]
RO-VIM-aws/stdeb.cfg [new file with mode: 0644]
RO-VIM-aws/tox.ini [new file with mode: 0644]
RO-VIM-azure/Makefile [new file with mode: 0644]
RO-VIM-azure/debian/python3-osm-rovim-azure.postinst [new file with mode: 0755]
RO-VIM-azure/osm_rovim_azure/vimconn_azure.py [new file with mode: 0755]
RO-VIM-azure/requirements.txt [new file with mode: 0644]
RO-VIM-azure/setup.py [new file with mode: 0644]
RO-VIM-azure/stdeb.cfg [new file with mode: 0644]
RO-VIM-azure/tox.ini [new file with mode: 0644]
RO-VIM-fos/Makefile [new file with mode: 0644]
RO-VIM-fos/debian/python3-osm-rovim-fos.postinst [new file with mode: 0755]
RO-VIM-fos/osm_rovim_fos/vimconn_fos.py [new file with mode: 0644]
RO-VIM-fos/requirements.txt [new file with mode: 0644]
RO-VIM-fos/setup.py [new file with mode: 0644]
RO-VIM-fos/stdeb.cfg [new file with mode: 0644]
RO-VIM-fos/tox.ini [new file with mode: 0644]
RO-VIM-opennebula/Makefile [new file with mode: 0644]
RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst [new file with mode: 0755]
RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py [new file with mode: 0644]
RO-VIM-opennebula/requirements.txt [new file with mode: 0644]
RO-VIM-opennebula/setup.py [new file with mode: 0644]
RO-VIM-opennebula/stdeb.cfg [new file with mode: 0644]
RO-VIM-opennebula/tox.ini [new file with mode: 0644]
RO-VIM-openstack/Makefile [new file with mode: 0644]
RO-VIM-openstack/debian/python3-osm-rovim-openstack.postinst [new file with mode: 0755]
RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py [new file with mode: 0644]
RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py [new file with mode: 0644]
RO-VIM-openstack/requirements.txt [new file with mode: 0644]
RO-VIM-openstack/setup.py [new file with mode: 0644]
RO-VIM-openstack/stdeb.cfg [new file with mode: 0644]
RO-VIM-openstack/tox.ini [new file with mode: 0644]
RO-VIM-openvim/Makefile [new file with mode: 0644]
RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py [new file with mode: 0644]
RO-VIM-openvim/requirements.txt [new file with mode: 0644]
RO-VIM-openvim/setup.py [new file with mode: 0644]
RO-VIM-openvim/stdeb.cfg [new file with mode: 0644]
RO-VIM-openvim/tox.ini [new file with mode: 0644]
RO-VIM-vmware/Makefile [new file with mode: 0644]
RO-VIM-vmware/debian/python3-osm-rovim-vmware.postinst [new file with mode: 0755]
RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py [new file with mode: 0755]
RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware_xml_response.py [new file with mode: 0644]
RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py [new file with mode: 0644]
RO-VIM-vmware/requirements.txt [new file with mode: 0644]
RO-VIM-vmware/setup.py [new file with mode: 0644]
RO-VIM-vmware/stdeb.cfg [new file with mode: 0644]
RO-VIM-vmware/tox.ini [new file with mode: 0644]
RO-client/Makefile [new file with mode: 0644]
RO-client/README.rst [new file with mode: 0644]
RO-client/debian/python3-osm-roclient.postinst [new file with mode: 0755]
RO-client/osm_roclient/roclient.py [new file with mode: 0755]
RO-client/requirements.txt [new file with mode: 0644]
RO-client/setup.py [new file with mode: 0644]
RO-client/stdeb.cfg [new file with mode: 0644]
RO-client/tox.ini [new file with mode: 0644]
RO/MANIFEST.in [new file with mode: 0644]
RO/Makefile [new file with mode: 0644]
RO/README.rst [new file with mode: 0644]
RO/debian/python3-osm-ro.postinst [new file with mode: 0755]
RO/osm_ro/__init__.py [new file with mode: 0644]
RO/osm_ro/console_proxy_thread.py [new file with mode: 0644]
RO/osm_ro/database_utils/dump_db.sh [new file with mode: 0755]
RO/osm_ro/database_utils/init_mano_db.sh [new file with mode: 0755]
RO/osm_ro/database_utils/install-db-server.sh [new file with mode: 0755]
RO/osm_ro/database_utils/mano_db_structure.sql [new file with mode: 0644]
RO/osm_ro/database_utils/migrate_mano_db.sh [new file with mode: 0755]
RO/osm_ro/database_utils/migrations/down/34_remove_wim_tables.sql [new file with mode: 0644]
RO/osm_ro/database_utils/migrations/down/35_remove_sfc_ingress_and_egress.sql [new file with mode: 0644]
RO/osm_ro/database_utils/migrations/up/34_add_wim_tables.sql [new file with mode: 0644]
RO/osm_ro/database_utils/migrations/up/35_add_sfc_ingress_and_egress.sql [new file with mode: 0644]
RO/osm_ro/db_base.py [new file with mode: 0644]
RO/osm_ro/http_tools/__init__.py [new file with mode: 0644]
RO/osm_ro/http_tools/errors.py [new file with mode: 0644]
RO/osm_ro/http_tools/handler.py [new file with mode: 0644]
RO/osm_ro/http_tools/request_processing.py [new file with mode: 0644]
RO/osm_ro/http_tools/tests/__init__.py [new file with mode: 0644]
RO/osm_ro/http_tools/tests/test_errors.py [new file with mode: 0644]
RO/osm_ro/http_tools/tests/test_handler.py [new file with mode: 0644]
RO/osm_ro/http_tools/tox.ini [new file with mode: 0644]
RO/osm_ro/httpserver.py [new file with mode: 0644]
RO/osm_ro/nfvo.py [new file with mode: 0644]
RO/osm_ro/nfvo_db.py [new file with mode: 0644]
RO/osm_ro/openmano_schemas.py [new file with mode: 0644]
RO/osm_ro/openmanoclient.py [new file with mode: 0644]
RO/osm_ro/openmanod.cfg [new file with mode: 0644]
RO/osm_ro/openmanod.py [new file with mode: 0755]
RO/osm_ro/osm-ro.service [new file with mode: 0644]
RO/osm_ro/scripts/RO-of [new file with mode: 0755]
RO/osm_ro/scripts/RO-start.sh [new file with mode: 0755]
RO/osm_ro/scripts/get-options.sh [new file with mode: 0644]
RO/osm_ro/scripts/install-lib-osm-openvim.sh [new file with mode: 0755]
RO/osm_ro/scripts/install-openmano-service.sh [new file with mode: 0755]
RO/osm_ro/scripts/install-openmano.sh [new file with mode: 0755]
RO/osm_ro/scripts/install-osm-im.sh [new file with mode: 0755]
RO/osm_ro/scripts/openmano-report [new file with mode: 0755]
RO/osm_ro/scripts/service-openmano [new file with mode: 0755]
RO/osm_ro/tests/__init__.py [new file with mode: 0644]
RO/osm_ro/tests/db_helpers.py [new file with mode: 0644]
RO/osm_ro/tests/helpers.py [new file with mode: 0644]
RO/osm_ro/tests/test_db.py [new file with mode: 0644]
RO/osm_ro/tests/test_utils.py [new file with mode: 0644]
RO/osm_ro/utils.py [new file with mode: 0644]
RO/osm_ro/vim_thread.py [new file with mode: 0644]
RO/osm_ro/vimconn.py [new file with mode: 0644]
RO/osm_ro/vmwarecli.py [new file with mode: 0755]
RO/osm_ro/wim/__init__.py [new file with mode: 0644]
RO/osm_ro/wim/actions.py [new file with mode: 0644]
RO/osm_ro/wim/engine.py [new file with mode: 0644]
RO/osm_ro/wim/errors.py [new file with mode: 0644]
RO/osm_ro/wim/failing_connector.py [new file with mode: 0644]
RO/osm_ro/wim/http_handler.py [new file with mode: 0644]
RO/osm_ro/wim/persistence.py [new file with mode: 0644]
RO/osm_ro/wim/schemas.py [new file with mode: 0644]
RO/osm_ro/wim/tests/__init__.py [new file with mode: 0644]
RO/osm_ro/wim/tests/fixtures.py [new file with mode: 0644]
RO/osm_ro/wim/tests/test_actions.py [new file with mode: 0644]
RO/osm_ro/wim/tests/test_engine.py [new file with mode: 0644]
RO/osm_ro/wim/tests/test_http_handler.py [new file with mode: 0644]
RO/osm_ro/wim/tests/test_persistence.py [new file with mode: 0644]
RO/osm_ro/wim/tests/test_wim_thread.py [new file with mode: 0644]
RO/osm_ro/wim/tox.ini [new file with mode: 0644]
RO/osm_ro/wim/wan_link_actions.py [new file with mode: 0644]
RO/osm_ro/wim/wim_thread.py [new file with mode: 0644]
RO/osm_ro/wim/wimconn.py [new file with mode: 0644]
RO/osm_ro/wim/wimconn_dynpac.py [new file with mode: 0644]
RO/osm_ro/wim/wimconn_fake.py [new file with mode: 0644]
RO/osm_ro/wim/wimconn_ietfl2vpn.py [new file with mode: 0644]
RO/osm_ro/wim/wimconn_odl.py [new file with mode: 0644]
RO/requirements.txt [new file with mode: 0644]
RO/setup.py [new file with mode: 0755]
RO/stdeb.cfg [new file with mode: 0644]
RO/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml [new file with mode: 0644]
RO/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml [new file with mode: 0644]
RO/test/RO_tests/empy_volume/scenario_additional_disk_empty_volume.yaml [new file with mode: 0644]
RO/test/RO_tests/empy_volume/vnfd_additional_disk_empty_volume.yaml [new file with mode: 0644]
RO/test/RO_tests/floating_ip/scenario_floating_ip.yaml [new file with mode: 0644]
RO/test/RO_tests/floating_ip/vnfd_floating_ip.yaml [new file with mode: 0644]
RO/test/RO_tests/image_based_volume/scenario_additional_disk_based_image.yaml [new file with mode: 0644]
RO/test/RO_tests/image_based_volume/vnfd_additional_disk_based_image.yaml [new file with mode: 0644]
RO/test/RO_tests/no_port_security/scenario_vnf_no_port_security.yaml [new file with mode: 0644]
RO/test/RO_tests/no_port_security/vnfd_no_port_security.yaml [new file with mode: 0644]
RO/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_2_vnf/vnfd_linux.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_cloud_init/scenario_simple-cloud-init.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_cloud_init/vnfd_linux-cloud-init.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_count3/scenario_linux_count3.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_count3/vnfd_count3.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_linux/scenario_simple_linux.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_linux/vnfd_linux.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_multi_vnfc/scenario_multi_vnfc.yaml [new file with mode: 0644]
RO/test/RO_tests/simple_multi_vnfc/vnfd_linux_2VMs_v02.yaml [new file with mode: 0644]
RO/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml [new file with mode: 0644]
RO/test/RO_tests/sr_iov/vnfd_1sriov.yaml [new file with mode: 0644]
RO/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
RO/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml [new file with mode: 0644]
RO/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml [new file with mode: 0644]
RO/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml [new file with mode: 0644]
RO/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml [new file with mode: 0644]
RO/test/basictest.sh [new file with mode: 0755]
RO/test/test-multivim.sh [new file with mode: 0755]
RO/test/test_RO.py [new file with mode: 0755]
RO/test/test_on_container.sh [new file with mode: 0755]
RO/test/test_openmanocli.sh [new file with mode: 0755]
RO/test/test_openmanoclient.py [new file with mode: 0755]
RO/test/test_osconnector.py [new file with mode: 0755]
RO/test/test_vimconn.sh [new file with mode: 0755]
RO/tox.ini [new file with mode: 0644]
database_utils/dump_db.sh [deleted file]
database_utils/init_mano_db.sh [deleted file]
database_utils/install-db-server.sh [deleted file]
database_utils/mano_db_structure.sql [deleted file]
database_utils/migrate_mano_db.sh [deleted file]
database_utils/migrations/down/34_remove_wim_tables.sql [deleted file]
database_utils/migrations/down/35_remove_sfc_ingress_and_egress.sql [deleted file]
database_utils/migrations/up/34_add_wim_tables.sql [deleted file]
database_utils/migrations/up/35_add_sfc_ingress_and_egress.sql [deleted file]
devops-stages/stage-archive.sh
devops-stages/stage-build.sh
devops-stages/stage-test.sh
docker/Dockerfile-local [deleted file]
openmano [deleted file]
openmanod [deleted file]
osm_ro/__init__.py [deleted file]
osm_ro/console_proxy_thread.py [deleted file]
osm_ro/db_base.py [deleted file]
osm_ro/http_tools/__init__.py [deleted file]
osm_ro/http_tools/errors.py [deleted file]
osm_ro/http_tools/handler.py [deleted file]
osm_ro/http_tools/request_processing.py [deleted file]
osm_ro/http_tools/tests/__init__.py [deleted file]
osm_ro/http_tools/tests/test_errors.py [deleted file]
osm_ro/http_tools/tests/test_handler.py [deleted file]
osm_ro/http_tools/tox.ini [deleted file]
osm_ro/httpserver.py [deleted file]
osm_ro/nfvo.py [deleted file]
osm_ro/nfvo_db.py [deleted file]
osm_ro/openmano_schemas.py [deleted file]
osm_ro/openmanoclient.py [deleted file]
osm_ro/openmanod.cfg [deleted file]
osm_ro/osm-ro.service [deleted file]
osm_ro/tests/__init__.py [deleted file]
osm_ro/tests/db_helpers.py [deleted file]
osm_ro/tests/helpers.py [deleted file]
osm_ro/tests/test_db.py [deleted file]
osm_ro/tests/test_utils.py [deleted file]
osm_ro/tests/test_vimconn_openstack.py [deleted file]
osm_ro/tests/test_vimconn_vmware.py [deleted file]
osm_ro/tests/test_vimconn_vmware_xml_response.py [deleted file]
osm_ro/utils.py [deleted file]
osm_ro/vim_thread.py [deleted file]
osm_ro/vimconn.py [deleted file]
osm_ro/vimconn_aws.py [deleted file]
osm_ro/vimconn_azure.py [deleted file]
osm_ro/vimconn_fos.py [deleted file]
osm_ro/vimconn_opennebula.py [deleted file]
osm_ro/vimconn_openstack.py [deleted file]
osm_ro/vimconn_openvim.py [deleted file]
osm_ro/vimconn_vmware.py [deleted file]
osm_ro/vmwarecli.py [deleted file]
osm_ro/wim/__init__.py [deleted file]
osm_ro/wim/actions.py [deleted file]
osm_ro/wim/engine.py [deleted file]
osm_ro/wim/errors.py [deleted file]
osm_ro/wim/failing_connector.py [deleted file]
osm_ro/wim/http_handler.py [deleted file]
osm_ro/wim/persistence.py [deleted file]
osm_ro/wim/schemas.py [deleted file]
osm_ro/wim/tests/__init__.py [deleted file]
osm_ro/wim/tests/fixtures.py [deleted file]
osm_ro/wim/tests/test_actions.py [deleted file]
osm_ro/wim/tests/test_engine.py [deleted file]
osm_ro/wim/tests/test_http_handler.py [deleted file]
osm_ro/wim/tests/test_persistence.py [deleted file]
osm_ro/wim/tests/test_wim_thread.py [deleted file]
osm_ro/wim/tox.ini [deleted file]
osm_ro/wim/wan_link_actions.py [deleted file]
osm_ro/wim/wim_thread.py [deleted file]
osm_ro/wim/wimconn.py [deleted file]
osm_ro/wim/wimconn_dynpac.py [deleted file]
osm_ro/wim/wimconn_fake.py [deleted file]
osm_ro/wim/wimconn_ietfl2vpn.py [deleted file]
osm_ro/wim/wimconn_odl.py [deleted file]
requirements.txt [deleted file]
scripts/RO-of [deleted file]
scripts/RO-start.sh [deleted file]
scripts/get-options.sh [deleted file]
scripts/install-lib-osm-openvim.sh [deleted file]
scripts/install-openmano-service.sh [deleted file]
scripts/install-openmano.sh [deleted file]
scripts/install-osm-im.sh [deleted file]
scripts/openmano-report [deleted file]
scripts/python-osm-ro.postinst [deleted file]
scripts/service-openmano [deleted file]
setup.py [deleted file]
stdeb.cfg [deleted file]
test-docker/Dockerfile-devops [new file with mode: 0644]
test-docker/test-gen-devops.sh [new file with mode: 0755]
test-docker/test-gen-local.sh [new file with mode: 0755]
test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml [deleted file]
test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml [deleted file]
test/RO_tests/empy_volume/scenario_additional_disk_empty_volume.yaml [deleted file]
test/RO_tests/empy_volume/vnfd_additional_disk_empty_volume.yaml [deleted file]
test/RO_tests/floating_ip/scenario_floating_ip.yaml [deleted file]
test/RO_tests/floating_ip/vnfd_floating_ip.yaml [deleted file]
test/RO_tests/image_based_volume/scenario_additional_disk_based_image.yaml [deleted file]
test/RO_tests/image_based_volume/vnfd_additional_disk_based_image.yaml [deleted file]
test/RO_tests/no_port_security/scenario_vnf_no_port_security.yaml [deleted file]
test/RO_tests/no_port_security/vnfd_no_port_security.yaml [deleted file]
test/RO_tests/passthrough/scenario_p2p_passthrough.yaml [deleted file]
test/RO_tests/passthrough/vnfd_1passthrough.yaml [deleted file]
test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml [deleted file]
test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml [deleted file]
test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml [deleted file]
test/RO_tests/pmp_sriov/vnfd_1sriov.yaml [deleted file]
test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml [deleted file]
test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml [deleted file]
test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml [deleted file]
test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml [deleted file]
test/RO_tests/simple_2_vnf/vnfd_linux.yaml [deleted file]
test/RO_tests/simple_cloud_init/scenario_simple-cloud-init.yaml [deleted file]
test/RO_tests/simple_cloud_init/vnfd_linux-cloud-init.yaml [deleted file]
test/RO_tests/simple_count3/scenario_linux_count3.yaml [deleted file]
test/RO_tests/simple_count3/vnfd_count3.yaml [deleted file]
test/RO_tests/simple_linux/scenario_simple_linux.yaml [deleted file]
test/RO_tests/simple_linux/vnfd_linux.yaml [deleted file]
test/RO_tests/simple_multi_vnfc/scenario_multi_vnfc.yaml [deleted file]
test/RO_tests/simple_multi_vnfc/vnfd_linux_2VMs_v02.yaml [deleted file]
test/RO_tests/sr_iov/scenario_p2p_sriov.yaml [deleted file]
test/RO_tests/sr_iov/vnfd_1sriov.yaml [deleted file]
test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml [deleted file]
test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml [deleted file]
test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml [deleted file]
test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml [deleted file]
test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml [deleted file]
test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml [deleted file]
test/basictest.sh [deleted file]
test/test-multivim.sh [deleted file]
test/test_RO.py [deleted file]
test/test_on_container.sh [deleted file]
test/test_openmanocli.sh [deleted file]
test/test_openmanoclient.py [deleted file]
test/test_osconnector.py [deleted file]
test/test_vimconn.sh [deleted file]
tox.ini [deleted file]

index a0f45ba..c758db9 100644 (file)
 # Use docker/Dockerfile-local for running osm/RO in a docker container from source
 
 FROM ubuntu:16.04
-
 RUN  apt-get update && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox apt-utils flake8 python-nose python-mock && \
-  DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \
-  DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \
-  DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \
-  DEBIAN_FRONTEND=noninteractive pip install -U pyang pyangbind && \
-  DEBIAN_FRONTEND=noninteractive pip3 install -U pyang pyangbind && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install python-yaml python-netaddr python-boto && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
-  DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:queens && \
-  DEBIAN_FRONTEND=noninteractive apt-get update && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \
-  DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \
-  DEBIAN_FRONTEND=noninteractive pip install -U fog05rest && \
-  DEBIAN_FRONTEND=noninteractive pip install -U azure && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \
-  DEBIAN_FRONTEND=noninteractive pip install untangle && \
-  DEBIAN_FRONTEND=noninteractive pip install pyone && \
-  DEBIAN_FRONTEND=noninteractive pip install -e git+https://github.com/python-oca/python-oca#egg=oca
+  DEBIAN_FRONTEND=noninteractive apt-get --yes install git tox make python-all python3 python3-pip debhelper wget && \
+  DEBIAN_FRONTEND=noninteractive apt-get --yes install python3-all libssl-dev && \
+  DEBIAN_FRONTEND=noninteractive pip3 install -U setuptools setuptools-version-command stdeb
+
+# FROM ubuntu:16.04
 
+# RUN  apt-get update && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox apt-utils flake8 python-nose python-mock && \
+#   DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \
+#   DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \
+#   DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \
+#   DEBIAN_FRONTEND=noninteractive pip install -U pyang pyangbind && \
+#   DEBIAN_FRONTEND=noninteractive pip3 install -U pyang pyangbind && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install python-yaml python-netaddr python-boto && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
+#   DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:queens && \
+#   DEBIAN_FRONTEND=noninteractive apt-get update && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \
+#   DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \
+#   DEBIAN_FRONTEND=noninteractive pip install -U fog05rest && \
+#   DEBIAN_FRONTEND=noninteractive pip install -U azure && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
+#   DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \
+#   DEBIAN_FRONTEND=noninteractive pip install untangle && \
+#   DEBIAN_FRONTEND=noninteractive pip install pyone && \
+#   DEBIAN_FRONTEND=noninteractive pip install -e git+https://github.com/python-oca/python-oca#egg=oca
 
+# TODO py3 comment
 # Uncomment this block to generate automatically a debian package and show info
 # # Set the working directory to /app
 # WORKDIR /app
diff --git a/Dockerfile-local b/Dockerfile-local
new file mode 100644 (file)
index 0000000..48447ac
--- /dev/null
@@ -0,0 +1,99 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+FROM ubuntu:18.04
+
+LABEL authors="Alfonso Tierno"
+
+RUN apt-get update && apt-get install -y git python3 python3-pip \
+    && python3 -m pip install --upgrade pip \
+    && DEBIAN_FRONTEND=noninteractive apt-get -y install libmysqlclient-dev mysql-client \
+    && DEBIAN_FRONTEND=noninteractive python3 -m pip install -U networking-l2gw  \
+    && DEBIAN_FRONTEND=noninteractive python3 -m pip install -U progressbar pyvmomi pyvcloud==19.1.1  \
+    && DEBIAN_FRONTEND=noninteractive apt-get -y install genisoimage
+
+# This is not needed, because package dependency will install anyway.
+# But done here in order to harry up image generation using cache
+
+RUN DEBIAN_FRONTEND=noninteractive  apt-get -y install python3-neutronclient python3-openstackclient \
+    python3-requests python3-netaddr python3-argcomplete
+
+#    DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
+# TODO py3   DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:rocky && apt-get update && apt-get install -y python3-networking-l2gw \
+
+#    DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi  libssl-dev libffi-dev python-mysqldb && \
+#    DEBIAN_FRONTEND=noninteractive pip2 install -U azure && \
+#    DEBIAN_FRONTEND=noninteractive pip2 install -U fog05rest && \
+#    && DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
+#    DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
+#    DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
+#    DEBIAN_FRONTEND=noninteractive pip2 install pyone && \
+#    DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
+
+COPY . /root/RO
+
+RUN /root/RO/RO/osm_ro/scripts/install-osm-im.sh --develop && \
+    /root/RO/RO/osm_ro/scripts/install-lib-osm-openvim.sh --develop && \
+    mkdir -p /var/log/osm && \
+    python3 -m pip install -e /root/RO/RO && \
+    python3 -m pip install -e /root/RO/RO-client && \
+    python3 -m pip install -e /root/RO/RO-VIM-vmware && \
+    python3 -m pip install -e /root/RO/RO-VIM-openstack && \
+    python3 -m pip install -e /root/RO/RO-VIM-openvim && \
+    python3 -m pip install -e /root/RO/RO-VIM-aws && \
+    python3 -m pip install -e /root/RO/RO-VIM-fos && \
+    rm -rf /root/.cache && \
+    apt-get clean && \
+    rm -rf /var/lib/apt/lists/*
+
+VOLUME /var/log/osm
+
+EXPOSE 9090
+
+# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers
+# These ENV must be provided
+# RO_DB_HOST: host of the main
+# RO_DB_OVIM_HOST: ...        if empty RO_DB_HOST is assumed
+# RO_DB_ROOT_PASSWORD: this has to be provided first time for creating database. It will create and init only if empty!
+# RO_DB_OVIM_ROOT_PASSWORD: ...  if empty RO_DB_ROOT_PASSWORD is assumed
+# RO_DB_USER:    default value 'mano'
+# RO_DB_OVIM_USER:       default value 'mano'
+# RO_DB_PASSWORD:        default value 'manopw'
+# RO_DB_OVIM_PASSWORD:        default value 'manopw'
+# RO_DB_PORT:             default value '3306'
+# RO_DB_OVIM_PORT:        default value '3306'
+# RO_DB_NAME:             default value 'mano_db'
+# RO_DB_OVIM_NAME:        default value 'mano_vim_db'
+# RO_LOG_FILE:            default log to stderr if not defined
+
+ENV RO_DB_HOST="" \
+    RO_DB_OVIM_HOST="" \
+    RO_DB_ROOT_PASSWORD="" \
+    RO_DB_OVIM_ROOT_PASSWORD="" \
+    RO_DB_USER=mano \
+    RO_DB_OVIM_USER=mano \
+    RO_DB_PASSWORD=manopw \
+    RO_DB_OVIM_PASSWORD=manopw \
+    RO_DB_PORT=3306 \
+    RO_DB_OVIM_PORT=3306 \
+    RO_DB_NAME=mano_db \
+    RO_DB_OVIM_NAME=mano_vim_db \
+    OPENMANO_TENANT=osm \
+    RO_LOG_LEVEL=DEBUG
+
+CMD RO-start.sh
+
+# HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=12 \
+#  CMD curl --silent --fail localhost:9090/openmano/tenants || exit 1
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644 (file)
index 483b709..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#include MANIFEST.in
-#include requirements.txt
-include README.rst
-include openmano
-include openmanod
-recursive-include osm_ro *
-
index 33deb4e..90ee12c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,3 @@
-# Copyright 2018 Telefonica S.A.
-#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-.PHONY: all test clean
-
-SHELL := /bin/bash
-
-BRANCH ?= master
-
-all:  #  lib-openvim    # osm-im
-       $(MAKE) clean_build build
-       $(MAKE) clean_build package
-
-clean: clean_build
-       rm -rf .build openvim IM
-
-clean_build:
-       rm -rf build
-       find osm_ro -name '*.pyc' -delete
-       find osm_ro -name '*.pyo' -delete
-
-prepare:
-#      ip install --user --upgrade setuptools
-       mkdir -p build/
-#      VER1=$(shell git describe | sed -e 's/^v//' |cut -d- -f1); \
-#      VER2=$(shell git describe | cut -d- -f2); \
-#      VER3=$(shell git describe | cut -d- -f3); \
-#      echo "$$VER1.dev$$VER2+$$VER3" > build/RO_VERSION
-       cp tox.ini build/
-       cp MANIFEST.in build/
-       cp requirements.txt build/
-       cp README.rst build/
-       cp setup.py build/
-       cp stdeb.cfg build/
-       cp -r osm_ro build/
-       cp openmano build/
-       cp openmanod build/
-       cp -r vnfs build/osm_ro
-       cp -r scenarios build/osm_ro
-       cp -r instance-scenarios build/osm_ro
-       cp -r scripts build/osm_ro
-       cp -r database_utils build/osm_ro
-       cp LICENSE build/osm_ro
-
-connectors: prepare
-       # python-novaclient is required for that
-       rm -f build/osm_ro/openmanolinkervimconn.py
-       cd build/osm_ro; for i in `ls vimconn_*.py |sed "s/\.py//"` ; do echo "import $$i" >> openmanolinkervimconn.py; done
-       python build/osm_ro/openmanolinkervimconn.py 2>&1
-       rm -f build/osm_ro/openmanolinkervimconn.py
-
-build: connectors prepare
-       python -m py_compile build/osm_ro/*.py
-#      cd build && tox -e flake8
-
-lib-openvim:
-       $(shell git clone https://osm.etsi.org/gerrit/osm/openvim)
-       LIB_BRANCH=$(shell git -C openvim branch -a|grep -oP 'remotes/origin/\K$(BRANCH)'); \
-       [ -z "$$LIB_BRANCH" ] && LIB_BRANCH='master'; \
-       echo "BRANCH: $(BRANCH)"; \
-       echo "LIB_OPENVIM_BRANCH: $$LIB_BRANCH"; \
-       git -C openvim checkout $$LIB_BRANCH
-       make -C openvim clean lite
-
-osm-im:
-       $(shell git clone https://osm.etsi.org/gerrit/osm/IM)
-       make -C IM clean all
-
-package: prepare
-#      apt-get install -y python-stdeb
-       cd build && python setup.py --command-packages=stdeb.command sdist_dsc --with-python2=True
-       cd build && cp osm_ro/scripts/python-osm-ro.postinst deb_dist/osm-ro*/debian/
-       cd build/deb_dist/osm-ro* && dpkg-buildpackage -rfakeroot -uc -us
-       mkdir -p .build
-       cp build/deb_dist/python-*.deb .build/
-
-snap:
-       echo "Nothing to be done yet"
-
-install: lib-openvim osm-im
-       dpkg -i IM/deb_dist/python-osm-im*.deb
-       dpkg -i openvim/.build/python-lib-osm-openvim*.deb
-       dpkg -i .build/python-osm-ro*.deb
-       cd .. && \
-       OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` || FATAL "lib-osm-openvim was not properly installed" && \
-       OSMRO_PATH=`python -c 'import osm_ro; print osm_ro.__path__[0]'` || FATAL "osm-ro was not properly installed" && \
-       USER=root DEBIAN_FRONTEND=noninteractive $$OSMRO_PATH/database_utils/install-db-server.sh --updatedb || FATAL "osm-ro db installation failed" && \
-       USER=root DEBIAN_FRONTEND=noninteractive $$OSMLIBOVIM_PATH/database_utils/install-db-server.sh -u mano -p manopw -d mano_vim_db --updatedb || FATAL "lib-osm-openvim db installation failed"
-       service osm-ro restart
-
-develop: prepare
-#      pip install -r requirements.txt
-       cd build && ./setup.py develop
-
-test:
-       . ./test/basictest.sh -f --insert-bashrc --install-openvim --init-openvim
-       . ./test/basictest.sh -f reset add-openvim
-       ./test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast
-       ./test/test_RO.py vim  -t osm  -d local-openvim --timeout=30 --failfast
-
-build-docker-from-source:
-       docker build -t osm/openmano -f docker/Dockerfile-local .
+SUBDIRS := $(wildcard */Makefile)
 
-run-docker:
-       docker-compose -f docker/openmano-compose.yml up
+all: clean package
+clean: $(SUBDIRS)
+package: $(SUBDIRS)
 
-stop-docker:
-       docker-compose -f docker/openmano-compose.yml down
+$(SUBDIRS):
+       $(MAKE) -C $(@:Makefile=) $(MAKECMDGOALS)
 
+.PHONY: all $(SUBDIRS)
 
diff --git a/README.rst b/README.rst
deleted file mode 100644 (file)
index 3a2be88..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-===========
-osm-ro
-===========
-
-osm-ro is the Resource Orchestrator for OSM, dealing with resource operations
-against different VIMs such as Openstack, VMware's vCloud Director, openvim
-and AWS.
-
diff --git a/RO-VIM-aws/Makefile b/RO-VIM-aws/Makefile
new file mode 100644 (file)
index 0000000..edf3eb7
--- /dev/null
@@ -0,0 +1,23 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_aws-*.tar.gz osm_rovim_aws.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cd deb_dist/osm-rovim-aws*/ && dpkg-buildpackage -rfakeroot -uc -us
diff --git a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py
new file mode 100644 (file)
index 0000000..8173662
--- /dev/null
@@ -0,0 +1,804 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2017 xFlow Research Pvt. Ltd
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: saboor.ahmad@xflowresearch.com
+##
+
+'''
+AWS-connector implements all the methods to interact with AWS using the BOTO client
+'''
+
+__author__ = "Saboor Ahmad"
+__date__ = "10-Apr-2017"
+
+from osm_ro import vimconn
+import yaml
+import logging
+import netaddr
+import time
+
+import boto
+import boto.ec2
+import boto.vpc
+
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                 config={}, persistent_info={}):
+        """ Params: uuid - id asigned to this VIM
+                name - name assigned to this VIM, can be used for logging
+                tenant_id - ID to be used for tenant
+                tenant_name - name of tenant to be used VIM tenant to be used
+                url_admin - optional, url used for administrative tasks
+                user - credentials of the VIM user
+                passwd - credentials of the VIM user
+                log_level - if must use a different log_level than the general one
+                config - dictionary with misc VIM information
+                    region_name - name of region to deploy the instances
+                    vpc_cidr_block - default CIDR block for VPC
+                    security_groups - default security group to specify this instance
+                persistent_info - dict where the class can store information that will be available among class
+                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                    empty dict. Useful to store login/tokens information for speed up communication
+        """
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                      config, persistent_info)
+
+        self.persistent_info = persistent_info
+        self.a_creds = {}
+        if user:
+            self.a_creds['aws_access_key_id'] = user
+        else:
+            raise vimconn.vimconnAuthException("Username is not specified")
+        if passwd:
+            self.a_creds['aws_secret_access_key'] = passwd
+        else:
+            raise vimconn.vimconnAuthException("Password is not specified")
+        if 'region_name' in config:
+            self.region = config.get('region_name')
+        else:
+            raise vimconn.vimconnException("AWS region_name is not specified at config")
+
+        self.vpc_data = {}
+        self.subnet_data = {}
+        self.conn = None
+        self.conn_vpc = None
+        self.account_id = None
+
+        self.vpc_id = self.get_tenant_list()[0]['id']
+        # we take VPC CIDR block if specified, otherwise we use the default CIDR
+        # block suggested by AWS while creating instance
+        self.vpc_cidr_block = '10.0.0.0/24'
+
+        if tenant_id:
+            self.vpc_id = tenant_id
+        if 'vpc_cidr_block' in config:
+            self.vpc_cidr_block = config['vpc_cidr_block']
+
+        self.security_groups = None
+        if 'security_groups' in config:
+            self.security_groups = config['security_groups']
+
+        self.key_pair = None
+        if 'key_pair' in config:
+            self.key_pair = config['key_pair']
+
+        self.flavor_info = None
+        if 'flavor_info' in config:
+            flavor_data = config.get('flavor_info')
+            if isinstance(flavor_data, str):
+                try:
+                    if flavor_data[0] == "@":  # read from a file
+                        with open(flavor_data[1:], 'r') as stream:
+                            self.flavor_info = yaml.load(stream, Loader=yaml.Loader)
+                    else:
+                        self.flavor_info = yaml.load(flavor_data, Loader=yaml.Loader)
+                except yaml.YAMLError as e:
+                    self.flavor_info = None
+                    raise vimconn.vimconnException("Bad format at file '{}': {}".format(flavor_data[1:], e))
+                except IOError as e:
+                    raise vimconn.vimconnException("Error reading file '{}': {}".format(flavor_data[1:], e))
+            elif isinstance(flavor_data, dict):
+                self.flavor_info = flavor_data
+
+        self.logger = logging.getLogger('openmano.vim.aws')
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+    def __setitem__(self, index, value):
+        """Params: index - name of value of set
+                   value - value to set
+        """
+        if index == 'user':
+            self.a_creds['aws_access_key_id'] = value
+        elif index == 'passwd':
+            self.a_creds['aws_secret_access_key'] = value
+        elif index == 'region':
+            self.region = value
+        else:
+            vimconn.vimconnector.__setitem__(self, index, value)
+
+    def _reload_connection(self):
+        """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services
+        """
+
+        try:
+            self.conn = boto.ec2.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
+                                                   aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+            self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
+                                                       aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+            # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+            # self.account_id = client.get_caller_identity()["Account"]
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def format_vimconn_exception(self, e):
+        """Params: an Exception object
+        Returns: Raises the exception 'e' passed in mehtod parameters
+        """
+
+        self.conn = None
+        self.conn_vpc = None
+        raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e))
+
+    def get_availability_zones_list(self):
+        """Obtain AvailabilityZones from AWS
+        """
+
+        try:
+            self._reload_connection()
+            az_list = []
+            for az in self.conn.get_all_zones():
+                az_list.append(az.name)
+            return az_list
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_tenant_list(self, filter_dict={}):
+        """Obtain tenants of VIM
+        filter_dict dictionary that can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+        """
+
+        try:
+            self._reload_connection()
+            vpc_ids = []
+            tfilters = {}
+            if filter_dict != {}:
+                if 'id' in filter_dict:
+                    vpc_ids.append(filter_dict['id'])
+                    tfilters['name'] = filter_dict['id']
+            tenants = self.conn_vpc.get_all_vpcs(vpc_ids, tfilters)
+            tenant_list = []
+            for tenant in tenants:
+                tenant_list.append({'id': str(tenant.id), 'name': str(tenant.id), 'status': str(tenant.state),
+                                    'cidr_block': str(tenant.cidr_block)})
+            return tenant_list
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def new_tenant(self, tenant_name, tenant_description):
+        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+        "tenant_name": string max lenght 64
+        "tenant_description": string max length 256
+        returns the tenant identifier or raise exception
+        """
+
+        self.logger.debug("Adding a new VPC")
+        try:
+            self._reload_connection()
+            vpc = self.conn_vpc.create_vpc(self.vpc_cidr_block)
+            self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_support=True)
+            self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_hostnames=True)
+
+            gateway = self.conn_vpc.create_internet_gateway()
+            self.conn_vpc.attach_internet_gateway(gateway.id, vpc.id)
+            route_table = self.conn_vpc.create_route_table(vpc.id)
+            self.conn_vpc.create_route(route_table.id, '0.0.0.0/0', gateway.id)
+
+            self.vpc_data[vpc.id] = {'gateway': gateway.id, 'route_table': route_table.id,
+                                     'subnets': self.subnet_sizes(len(self.get_availability_zones_list()),
+                                                                  self.vpc_cidr_block)}
+            return vpc.id
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def delete_tenant(self, tenant_id):
+        """Delete a tenant from VIM
+        tenant_id: returned VIM tenant_id on "new_tenant"
+        Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+        """
+
+        self.logger.debug("Deleting specified VPC")
+        try:
+            self._reload_connection()
+            vpc = self.vpc_data.get(tenant_id)
+            if 'gateway' in vpc and 'route_table' in vpc:
+                gateway_id, route_table_id = vpc['gateway'], vpc['route_table']
+                self.conn_vpc.detach_internet_gateway(gateway_id, tenant_id)
+                self.conn_vpc.delete_vpc(tenant_id)
+                self.conn_vpc.delete_route(route_table_id, '0.0.0.0/0')
+            else:
+                self.conn_vpc.delete_vpc(tenant_id)
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def subnet_sizes(self, availability_zones, cidr):
+        """Calcualtes possible subnets given CIDR value of VPC
+        """
+
+        if availability_zones != 2 and availability_zones != 3:
+            self.logger.debug("Number of AZs should be 2 or 3")
+            raise vimconn.vimconnNotSupportedException("Number of AZs should be 2 or 3")
+
+        netmasks = ('255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128')
+        ip = netaddr.IPNetwork(cidr)
+        mask = ip.netmask
+
+        if str(mask) not in netmasks:
+            self.logger.debug("Netmask " + str(mask) + " not found")
+            raise vimconn.vimconnNotFoundException("Netmask " + str(mask) + " not found")
+
+        if availability_zones == 2:
+            for n, netmask in enumerate(netmasks):
+                if str(mask) == netmask:
+                    subnets = list(ip.subnet(n + 24))
+        else:
+            for n, netmask in enumerate(netmasks):
+                if str(mask) == netmask:
+                    pub_net = list(ip.subnet(n + 24))
+                    pri_subs = pub_net[1:]
+                    pub_mask = pub_net[0].netmask
+            pub_split = list(ip.subnet(26)) if (str(pub_mask) == '255.255.255.0') else list(ip.subnet(27))
+            pub_subs = pub_split[:3]
+            subnets = pub_subs + pri_subs
+
+        return map(str, subnets)
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
+                'ip-version': can be one of ["IPv4","IPv6"]
+                'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway-address': (Optional) ip_schema, that is X.X.X.X
+                'dns-address': (Optional) ip_schema,
+                'dhcp': (Optional) dict containing
+                    'enabled': {"type": "boolean"},
+                    'start-address': ip_schema, first IP to grant
+                    'count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+
+        self.logger.debug("Adding a subnet to VPC")
+        try:
+            created_items = {}
+            self._reload_connection()
+            subnet = None
+            vpc_id = self.vpc_id
+            if self.vpc_data.get(vpc_id, None):
+                cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0]
+            else:
+                vpc = self.get_tenant_list({'id': vpc_id})[0]
+                subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block'])
+                cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, detail='cidr_block')))[0]
+            subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block)
+            return subnet.id, created_items
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_network_details(self, filters, detail):
+        """Get specified details related to a subnet
+        """
+        detail_list = []
+        subnet_list = self.get_network_list(filters)
+        for net in subnet_list:
+            detail_list.append(net[detail])
+        return detail_list
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Params:
+            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                name: string  => returns only networks with this name
+                id:   string  => returns networks with this VIM id, this imply returns one network at most
+                shared: boolean >= returns only networks that are (or are not) shared
+                tenant_id: sting => returns only networks that belong to this tenant/project
+                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+        Returns the network list of dictionaries. each dictionary contains:
+            'id': (mandatory) VIM network id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+
+        self.logger.debug("Getting all subnets from VIM")
+        try:
+            self._reload_connection()
+            tfilters = {}
+            if filter_dict != {}:
+                if 'tenant_id' in filter_dict:
+                    tfilters['vpcId'] = filter_dict['tenant_id']
+            subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters)
+            net_list = []
+            for net in subnets:
+                net_list.append(
+                    {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id),
+                     'cidr_block': str(net.cidr_block), 'type': 'bridge'})
+            return net_list
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_network(self, net_id):
+        """Obtain network details from the 'net_id' VIM network
+        Return a dict that contains:
+            'id': (mandatory) VIM network id, that is, net_id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        Raises an exception upon error or when network is not found
+        """
+
+        self.logger.debug("Getting Subnet from VIM")
+        try:
+            self._reload_connection()
+            subnet = self.conn_vpc.get_all_subnets(net_id)[0]
+            return {'id': str(subnet.id), 'name': str(subnet.id), 'status': str(subnet.state),
+                    'vpc_id': str(subnet.vpc_id), 'cidr_block': str(subnet.cidr_block)}
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+
+        self.logger.debug("Deleting subnet from VIM")
+        try:
+            self._reload_connection()
+            self.logger.debug("DELETING NET_ID: " + str(net_id))
+            self.conn_vpc.delete_subnet(net_id)
+            return net_id
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def refresh_nets_status(self, net_list):
+        """Get the status of the networks
+        Params:
+            'net_list': a list with the VIM network id to be get the status
+        Returns a dictionary with:
+            'net_id':         #VIM id of this network
+                status:     #Mandatory. Text with one of:
+                    #  DELETED (not found at vim)
+                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                    #  OTHER (Vim reported other status not understood)
+                    #  ERROR (VIM indicates an ERROR status)
+                    #  ACTIVE, INACTIVE, DOWN (admin down),
+                    #  BUILD (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+            'net_id2': ...
+        """
+
+        self._reload_connection()
+        try:
+            dict_entry = {}
+            for net_id in net_list:
+                subnet_dict = {}
+                subnet = None
+                try:
+                    subnet = self.conn_vpc.get_all_subnets(net_id)[0]
+                    if subnet.state == "pending":
+                        subnet_dict['status'] = "BUILD"
+                    elif subnet.state == "available":
+                        subnet_dict['status'] = 'ACTIVE'
+                    else:
+                        subnet_dict['status'] = 'ERROR'
+                    subnet_dict['error_msg'] = ''
+                except Exception as e:
+                    subnet_dict['status'] = 'DELETED'
+                    subnet_dict['error_msg'] = 'Network not found'
+                finally:
+                    try:
+                        subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256)
+                    except yaml.YAMLError as e:
+                        subnet_dict['vim_info'] = str(subnet)
+                dict_entry[net_id] = subnet_dict
+            return dict_entry
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_flavor(self, flavor_id):
+        """Obtain flavor details from the VIM
+        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+        Raises an exception upon error or if not found
+        """
+
+        self.logger.debug("Getting instance type")
+        try:
+            if flavor_id in self.flavor_info:
+                return self.flavor_info[flavor_id]
+            else:
+                raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name")
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_flavor_id_from_data(self, flavor_dict):
+        """Obtain flavor id that match the flavor description
+        Params:
+            'flavor_dict': dictionary that contains:
+                'disk': main hard disk in GB
+                'ram': memory in MB
+                'vcpus': number of virtual cpus
+                #todo: complete parameters for EPA
+        Returns the flavor_id or raises a vimconnNotFoundException
+        """
+
+        self.logger.debug("Getting flavor id from data")
+        try:
+            flavor = None
+            for key, values in self.flavor_info.items():
+                if (values["ram"], values["cpus"], values["disk"]) == (
+                flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
+                    flavor = (key, values)
+                    break
+                elif (values["ram"], values["cpus"], values["disk"]) >= (
+                flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
+                    if not flavor:
+                        flavor = (key, values)
+                    else:
+                        if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= (
+                        values["ram"], values["cpus"], values["disk"]):
+                            flavor = (key, values)
+            if flavor:
+                return flavor[0]
+            raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name")
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def new_image(self, image_dict):
+        """ Adds a tenant image to VIM
+        Params: image_dict
+                    name (string) - The name of the AMI. Valid only for EBS-based images.
+                    description (string) - The description of the AMI.
+                    image_location (string) - Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI’s.
+                    architecture (string) - The architecture of the AMI. Valid choices are: * i386 * x86_64
+                    kernel_id (string) -  The ID of the kernel with which to launch the instances
+                    root_device_name (string) - The root device name (e.g. /dev/sdh)
+                    block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) - A BlockDeviceMapping data structure describing the EBS volumes associated with the Image.
+                    virtualization_type (string) - The virutalization_type of the image. Valid choices are: * paravirtual * hvm
+                    sriov_net_support (string) - Advanced networking support. Valid choices are: * simple
+                    snapshot_id (string) - A snapshot ID for the snapshot to be used as root device for the image. Mutually exclusive with block_device_map, requires root_device_name
+                    delete_root_volume_on_termination (bool) - Whether to delete the root volume of the image after instance termination. Only applies when creating image from snapshot_id. Defaults to False. Note that leaving volumes behind after instance termination is not free
+        Returns: image_id - image ID of the newly created image
+        """
+
+        try:
+            self._reload_connection()
+            image_location = image_dict.get('image_location', None)
+            if image_location:
+                image_location = str(self.account_id) + str(image_location)
+
+            image_id = self.conn.register_image(image_dict.get('name', None), image_dict.get('description', None),
+                                                image_location, image_dict.get('architecture', None),
+                                                image_dict.get('kernel_id', None),
+                                                image_dict.get('root_device_name', None),
+                                                image_dict.get('block_device_map', None),
+                                                image_dict.get('virtualization_type', None),
+                                                image_dict.get('sriov_net_support', None),
+                                                image_dict.get('snapshot_id', None),
+                                                image_dict.get('delete_root_volume_on_termination', None))
+            return image_id
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def delete_image(self, image_id):
+        """Deletes a tenant image from VIM
+        Returns the image_id if image is deleted or raises an exception on error"""
+
+        try:
+            self._reload_connection()
+            self.conn.deregister_image(image_id)
+            return image_id
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_image_id_from_path(self, path):
+        '''
+        Params: path - location of the image
+        Returns: image_id - ID of the matching image
+        '''
+        self._reload_connection()
+        try:
+            filters = {}
+            if path:
+                tokens = path.split('/')
+                filters['owner_id'] = tokens[0]
+                filters['name'] = '/'.join(tokens[1:])
+            image = self.conn.get_all_images(filters=filters)[0]
+            return image.id
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_image_list(self, filter_dict={}):
+        """Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        """
+
+        self.logger.debug("Getting image list from VIM")
+        try:
+            self._reload_connection()
+            image_id = None
+            filters = {}
+            if 'id' in filter_dict:
+                image_id = filter_dict['id']
+            if 'name' in filter_dict:
+                filters['name'] = filter_dict['name']
+            if 'location' in filter_dict:
+                filters['location'] = filter_dict['location']
+            # filters['image_type'] = 'machine'
+            # filter_dict['owner_id'] = self.account_id
+            images = self.conn.get_all_images(image_id, filters=filters)
+            image_list = []
+            for image in images:
+                image_list.append({'id': str(image.id), 'name': str(image.name), 'status': str(image.state),
+                                   'owner': str(image.owner_id), 'location': str(image.location),
+                                   'is_public': str(image.is_public), 'architecture': str(image.architecture),
+                                   'platform': str(image.platform)})
+            return image_list
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
+                       disk_list=None, availability_zone_index=None, availability_zone_list=None):
+        """Create a new VM/instance in AWS
+        Params: name
+                decription
+                start: (boolean) indicates if VM must start or created in pause mode.
+                image_id - image ID in AWS
+                flavor_id - instance type ID in AWS
+                net_list
+                    name
+                    net_id - subnet_id from AWS
+                    vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                    model: (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                    mac_address: (optional) mac address to assign to this interface
+                    type: (mandatory) can be one of:
+                        virtual, in this case always connected to a network of type 'net_type=bridge'
+                        'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                        'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                        VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                            are allocated on the same physical NIC
+                    bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                    port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing or True, it must apply the default VIM behaviour
+                    vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this interface. 'net_list' is modified
+                    elastic_ip - True/False to define if an elastic_ip is required
+                cloud_config': (optional) dictionary with:
+                    key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                    users': (optional) list of users to be inserted, each item is a dict with:
+                        name': (mandatory) user name,
+                        key-pairs': (optional) list of strings with the public key to be inserted to the user
+                    user-data': (optional) string is a text script to be passed directly to cloud-init
+                    config-files': (optional). List of files to be transferred. Each item is a dict with:
+                        dest': (mandatory) string with the destination absolute path
+                        encoding': (optional, by default text). Can be one of:
+                            b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                        content' (mandatory): string with the content of the file
+                        permissions': (optional) string with file permissions, typically octal notation '0644'
+                        owner: (optional) file owner, string with the format 'owner:group'
+                    boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
+                    security-groups:
+                        subnet_id
+                        security_group_id
+                disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                    image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                    size': (mandatory) string with the size of the disk in GB
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+
+        self.logger.debug("Creating a new VM instance")
+        try:
+            self._reload_connection()
+            instance = None
+            _, userdata = self._create_user_data(cloud_config)
+
+            if not net_list:
+                reservation = self.conn.run_instances(
+                    image_id,
+                    key_name=self.key_pair,
+                    instance_type=flavor_id,
+                    security_groups=self.security_groups,
+                    user_data=userdata
+                )
+            else:
+                for index, subnet in enumerate(net_list):
+                    net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnet.get('net_id'),
+                                                                                       groups=None,
+                                                                                       associate_public_ip_address=True)
+
+                    if subnet.get('elastic_ip'):
+                        eip = self.conn.allocate_address()
+                        self.conn.associate_address(allocation_id=eip.allocation_id, network_interface_id=net_intr.id)
+
+                    if index == 0:
+                        reservation = self.conn.run_instances(
+                            image_id,
+                            key_name=self.key_pair,
+                            instance_type=flavor_id,
+                            security_groups=self.security_groups,
+                            network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
+                            user_data=userdata
+                        )
+                    else:
+                        while True:
+                            try:
+                                self.conn.attach_network_interface(
+                                    network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
+                                    instance_id=instance.id, device_index=0)
+                                break
+                            except:
+                                time.sleep(10)
+                    net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id
+
+            instance = reservation.instances[0]
+            return instance.id, None
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_vminstance(self, vm_id):
+        """Returns the VM instance information from VIM"""
+
+        try:
+            self._reload_connection()
+            reservation = self.conn.get_all_instances(vm_id)
+            return reservation[0].instances[0].__dict__
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def delete_vminstance(self, vm_id, created_items=None):
+        """Removes a VM instance from VIM
+        Returns the instance identifier"""
+
+        try:
+            self._reload_connection()
+            self.logger.debug("DELETING VM_ID: " + str(vm_id))
+            self.conn.terminate_instances(vm_id)
+            return vm_id
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def refresh_vms_status(self, vm_list):
+        """ Get the status of the virtual machines and their interfaces/ports
+        Params: the list of VM identifiers
+        Returns a dictionary with:
+            vm_id:          #VIM id of this Virtual Machine
+                status:     #Mandatory. Text with one of:
+                            #  DELETED (not found at vim)
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                            #  OTHER (Vim reported other status not understood)
+                            #  ERROR (VIM indicates an ERROR status)
+                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                            #  BUILD (on building process), ERROR
+                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                            #
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                interfaces: list with interface info. Each item a dictionary with:
+                    vim_interface_id -  The ID of the ENI.
+                    vim_net_id - The ID of the VPC subnet.
+                    mac_address - The MAC address of the interface.
+                    ip_address - The IP address of the interface within the subnet.
+        """
+        self.logger.debug("Getting VM instance information from VIM")
+        try:
+            self._reload_connection()
+            reservation = self.conn.get_all_instances(vm_list)[0]
+            instances = {}
+            instance_dict = {}
+            for instance in reservation.instances:
+                try:
+                    if instance.state in ("pending"):
+                        instance_dict['status'] = "BUILD"
+                    elif instance.state in ("available", "running", "up"):
+                        instance_dict['status'] = 'ACTIVE'
+                    else:
+                        instance_dict['status'] = 'ERROR'
+                    instance_dict['error_msg'] = ""
+                    instance_dict['interfaces'] = []
+                    interface_dict = {}
+                    for interface in instance.interfaces:
+                        interface_dict['vim_interface_id'] = interface.id
+                        interface_dict['vim_net_id'] = interface.subnet_id
+                        interface_dict['mac_address'] = interface.mac_address
+                        if hasattr(interface, 'publicIp') and interface.publicIp != None:
+                            interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address
+                        else:
+                            interface_dict['ip_address'] = interface.private_ip_address
+                        instance_dict['interfaces'].append(interface_dict)
+                except Exception as e:
+                    self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
+                    instance_dict['status'] = "DELETED"
+                    instance_dict['error_msg'] = str(e)
+                finally:
+                    try:
+                        instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256)
+                    except yaml.YAMLError as e:
+                        # self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
+                        instance_dict['vim_info'] = str(instance)
+                instances[instance.id] = instance_dict
+            return instances
+        except Exception as e:
+            self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
+            self.format_vimconn_exception(e)
+
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        """Send and action over a VM instance from VIM
+        Returns the vm_id if the action was successfully sent to the VIM"""
+
+        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+        try:
+            self._reload_connection()
+            if "start" in action_dict:
+                self.conn.start_instances(vm_id)
+            elif "stop" in action_dict or "stop" in action_dict:
+                self.conn.stop_instances(vm_id)
+            elif "terminate" in action_dict:
+                self.conn.terminate_instances(vm_id)
+            elif "reboot" in action_dict:
+                self.conn.reboot_instances(vm_id)
+            return None
+        except Exception as e:
+            self.format_vimconn_exception(e)
diff --git a/RO-VIM-aws/requirements.txt b/RO-VIM-aws/requirements.txt
new file mode 100644 (file)
index 0000000..ed019dc
--- /dev/null
@@ -0,0 +1,21 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+boto
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
diff --git a/RO-VIM-aws/setup.py b/RO-VIM-aws/setup.py
new file mode 100644 (file)
index 0000000..30b90bd
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_aws"
+
+README = """
+===========
+osm-rovim_aws
+===========
+
+osm-ro pluging for aws VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for aws',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    # TODO py3 author_email='',
+    maintainer='OSM_TECH@LIST.ETSI.ORG',  # TODO py3
+    # TODO py3 maintainer_email='',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=[
+        "requests", "netaddr", "PyYAML", "osm-ro", "boto"
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_aws = osm_rovim_aws.vimconn_aws'],
+    },
+)
diff --git a/RO-VIM-aws/stdeb.cfg b/RO-VIM-aws/stdeb.cfg
new file mode 100644 (file)
index 0000000..2193709
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-boto, python3-requests, python3-netaddr, python3-yaml, python3-osm-ro
diff --git a/RO-VIM-aws/tox.ini b/RO-VIM-aws/tox.ini
new file mode 100644 (file)
index 0000000..067b0d4
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_aws --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_aws.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-VIM-azure/Makefile b/RO-VIM-azure/Makefile
new file mode 100644 (file)
index 0000000..d5b779a
--- /dev/null
@@ -0,0 +1,25 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_azure-*.tar.gz osm_rovim_azure.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-rovim-azure.postinst deb_dist/osm-rovim-azure*/debian/
+       cd deb_dist/osm-rovim-azure*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-VIM-azure/debian/python3-osm-rovim-azure.postinst b/RO-VIM-azure/debian/python3-osm-rovim-azure.postinst
new file mode 100755 (executable)
index 0000000..ebb69b1
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROVIM-AZURE"
+
+#Pip packages required for azure connector
+python3 -m pip install azure
+
diff --git a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py
new file mode 100755 (executable)
index 0000000..0cc143f
--- /dev/null
@@ -0,0 +1,495 @@
+# -*- coding: utf-8 -*-
+
+__author__='Sergio Gonzalez'
+__date__ ='$18-apr-2019 23:59:59$'
+
+from osm_ro import vimconn
+import logging
+
+from os import getenv
+from uuid import uuid4
+
+from azure.common.credentials import ServicePrincipalCredentials
+from azure.mgmt.resource import ResourceManagementClient
+from azure.mgmt.network import NetworkManagementClient
+from azure.mgmt.compute import ComputeManagementClient
+
+
+class vimconnector(vimconn.vimconnector):
+
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                 config={}, persistent_info={}):
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                      config, persistent_info)
+
+        # LOGGER
+        self.logger = logging.getLogger('openmano.vim.azure')
+        if log_level:
+            logging.basicConfig()
+            self.logger.setLevel(getattr(logging, log_level))
+
+        # CREDENTIALS 
+        self.credentials = ServicePrincipalCredentials(
+            client_id=user,
+            secret=passwd,
+            tenant=(tenant_id or tenant_name)
+        )
+
+        # SUBSCRIPTION
+        if 'subscription_id' in config:
+            self.subscription_id = config.get('subscription_id')
+            self.logger.debug('Setting subscription '+str(self.subscription_id))
+        else:
+            raise vimconn.vimconnException('Subscription not specified')
+        # REGION
+        if 'region_name' in config:
+            self.region = config.get('region_name')
+        else:
+            raise vimconn.vimconnException('Azure region_name is not specified at config')
+        # RESOURCE_GROUP
+        if 'resource_group' in config:
+            self.resource_group = config.get('resource_group')
+        else:
+            raise vimconn.vimconnException('Azure resource_group is not specified at config')
+        # VNET_NAME
+        if 'vnet_name' in config:
+            self.vnet_name = config["vnet_name"]
+            
+        # public ssh key
+        self.pub_key = config.get('pub_key')
+            
+    def _reload_connection(self):
+        """
+        Sets connections to work with Azure service APIs
+        :return:
+        """
+        self.logger.debug('Reloading API Connection')
+        try:
+            self.conn = ResourceManagementClient(self.credentials, self.subscription_id)
+            self.conn_compute = ComputeManagementClient(self.credentials, self.subscription_id)
+            self.conn_vnet = NetworkManagementClient(self.credentials, self.subscription_id)
+            self._check_or_create_resource_group()
+            self._check_or_create_vnet()
+        except Exception as e:
+            self.format_vimconn_exception(e)            
+
+    def _get_resource_name_from_resource_id(self, resource_id):
+        return str(resource_id.split('/')[-1])
+
+    def _get_location_from_resource_group(self, resource_group_name):
+        return self.conn.resource_groups.get(resource_group_name).location
+        
+    def _get_resource_group_name_from_resource_id(self, resource_id):
+        return str(resource_id.split('/')[4])
+
+    def _check_subnets_for_vm(self, net_list):
+        # All subnets must belong to the same resource group and vnet
+        if len(set(self._get_resource_group_name_from_resource_id(net['id']) +
+                   self._get_resource_name_from_resource_id(net['id']) for net in net_list)) != 1:
+            raise self.format_vimconn_exception('Azure VMs can only attach to subnets in same VNET')
+
+    def format_vimconn_exception(self, e):
+        """
+        Params: an Exception object
+        :param e:
+        :return: Raises the proper vimconnException
+        """
+        self.conn = None
+        self.conn_vnet = None
+        raise vimconn.vimconnConnectionException(type(e).__name__ + ': ' + str(e))        
+
+    def _check_or_create_resource_group(self):
+        """
+        Creates a resource group in indicated region
+        :return: None
+        """
+        self.logger.debug('Creating RG {} in location {}'.format(self.resource_group, self.region))
+        self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region})
+
+    def _check_or_create_vnet(self):
+        try:
+            vnet_params = {
+                'location': self.region,
+                'address_space': {
+                    'address_prefixes': "10.0.0.0/8"
+                },
+            }
+            self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """
+        Adds a tenant network to VIM
+        :param net_name: name of the network
+        :param net_type:
+        :param ip_profile: is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
+                'ip-version': can be one of ['IPv4','IPv6']
+                'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway-address': (Optional) ip_schema, that is X.X.X.X
+                'dns-address': (Optional) ip_schema,
+                'dhcp': (Optional) dict containing
+                    'enabled': {'type': 'boolean'},
+                    'start-address': ip_schema, first IP to grant
+                    'count': number of IPs to grant.
+        :param shared:
+        :param vlan:
+        :return: a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+
+        return self._new_subnet(net_name, ip_profile)
+
+    def _new_subnet(self, net_name, ip_profile):
+        """
+        Adds a tenant network to VIM. It creates a new VNET with a single subnet
+        :param net_name:
+        :param ip_profile:
+        :return:
+        """
+        self.logger.debug('Adding a subnet to VNET '+self.vnet_name)
+        self._reload_connection()
+
+        if ip_profile is None:
+            # TODO get a non used vnet ip range /24 and allocate automatically
+            raise vimconn.vimconnException('Azure cannot create VNET with no CIDR')
+
+        try:
+            vnet_params= {
+                'location': self.region,
+                'address_space': {
+                    'address_prefixes': [ip_profile['subnet_address']]
+                },
+                'subnets': [
+                    {
+                        'name': "{}-{}".format(net_name[:24], uuid4()),
+                        'address_prefix': ip_profile['subnet_address']
+                    }
+                ]
+            }
+            self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
+            # TODO return a tuple (subnet-ID, None)
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def _create_nic(self, subnet_id, nic_name, static_ip=None):
+        self._reload_connection()
+        
+        resource_group_name=self._get_resource_group_name_from_resource_id(subnet_id)
+        location = self._get_location_from_resource_group(resource_group_name)
+            
+        if static_ip:
+            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
+                resource_group_name,
+                nic_name,
+                {
+                    'location': location,
+                    'ip_configurations': [{
+                        'name': nic_name + 'ipconfiguration',
+                        'privateIPAddress': static_ip,
+                        'privateIPAllocationMethod': 'Static',
+                        'subnet': {
+                            'id': subnet_id
+                        }
+                    }]
+                }
+            )
+        else:
+            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
+                resource_group_name,
+                nic_name,
+                {
+                    'location': location,
+                    'ip_configurations': [{
+                        'name': nic_name + 'ipconfiguration',
+                        'subnet': {
+                            'id': subnet_id
+                        }
+                    }]
+                }
+            )
+
+        return async_nic_creation.result()
+
+    def get_image_list(self, filter_dict={}):
+        """
+        The urn contains for marketplace  'publisher:offer:sku:version'
+
+        :param filter_dict:
+        :return:
+        """
+        image_list = []
+
+        self._reload_connection()
+        if filter_dict.get("name"):
+            params = filter_dict["name"].split(":")
+            if len(params) >= 3:
+                publisher = params[0]
+                offer = params[1]
+                sku = params[2]
+                version = None
+                if len(params) == 4:
+                    version = params[3]
+                images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku)
+                for image in images:
+                    if version:
+                        image_version = str(image.id).split("/")[-1]
+                        if image_version != version:
+                            continue
+                    image_list.append({
+                        'id': str(image.id),
+                        'name': self._get_resource_name_from_resource_id(image.id)
+                    })
+                return image_list
+
+        images = self.conn_compute.virtual_machine_images.list()
+
+        for image in images:
+            # TODO implement filter_dict
+            if filter_dict:
+                if filter_dict.get("id") and str(image.id) != filter_dict["id"]:
+                    continue
+                if filter_dict.get("name") and \
+                        self._get_resource_name_from_resource_id(image.id) != filter_dict["name"]:
+                    continue
+                # TODO add checksum
+            image_list.append({
+                'id': str(image.id),
+                'name': self._get_resource_name_from_resource_id(image.id),
+            })
+        return image_list
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            shared: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries
+        """
+        self.logger.debug('Getting all subnets from VIM')
+        try:
+            self._reload_connection()
+            vnet = self.conn_vnet.virtual_networks.get(self.config["resource_group"], self.vnet_name)
+            subnet_list = []
+            
+            for subnet in vnet.subnets:
+                # TODO implement filter_dict
+                if filter_dict:
+                    if filter_dict.get("id") and str(subnet.id) != filter_dict["id"]:
+                        continue
+                    if filter_dict.get("name") and \
+                            self._get_resource_name_from_resource_id(subnet.id) != filter_dict["name"]:
+                        continue
+
+                subnet_list.append({
+                    'id': str(subnet.id),
+                     'name': self._get_resource_name_from_resource_id(subnet.id),
+                     'status': str(vnet.provisioning_state),  # TODO Does subnet contains status???
+                     'cidr_block': str(subnet.address_prefix)
+                    }
+                )
+            return subnet_list
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def new_vminstance(self, vm_name, description, start, image_id, flavor_id, net_list, cloud_config=None,
+                       disk_list=None, availability_zone_index=None, availability_zone_list=None):
+
+        return self._new_vminstance(vm_name, image_id, flavor_id, net_list)
+        
+    def _new_vminstance(self, vm_name, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                        availability_zone_index=None, availability_zone_list=None):
+        #Create NICs
+        self._check_subnets_for_vm(net_list)
+        vm_nics = []
+        for idx, net in enumerate(net_list):
+            subnet_id=net['subnet_id']
+            nic_name = vm_name + '-nic-'+str(idx)
+            vm_nic = self._create_nic(subnet_id, nic_name)
+            vm_nics.append({ 'id': str(vm_nic.id)})
+
+        try:
+            vm_parameters = {
+                'location': self.region,
+                'os_profile': {
+                    'computer_name': vm_name,  # TODO if vm_name cannot be repeated add uuid4() suffix
+                    'admin_username': 'sergio',  # TODO is it mandatory???
+                    'linuxConfiguration': {
+                        'disablePasswordAuthentication': 'true',
+                        'ssh': {
+                          'publicKeys': [
+                            {
+                              'path': '/home/sergio/.ssh/authorized_keys',
+                              'keyData': self.pub_key
+                            }
+                          ]
+                        }
+                    }                    
+                    
+                },
+                'hardware_profile': {
+                    'vm_size':flavor_id
+                },
+                'storage_profile': {
+                    'image_reference': image_id
+                },
+                'network_profile': {
+                    'network_interfaces': [
+                        vm_nics[0]
+                    ]
+                }
+            }
+            creation_result = self.conn_compute.virtual_machines.create_or_update(
+                self.resource_group, 
+                vm_name, 
+                vm_parameters
+            )
+            
+            run_command_parameters = {
+                'command_id': 'RunShellScript', # For linux, don't change it
+                'script': [
+                'date > /home/sergio/test.txt'
+                ]
+            }
+            poller = self.conn_compute.virtual_machines.run_command(
+                self.resource_group, 
+                vm_name, 
+                run_command_parameters
+            )
+            # TODO return a tuple (vm-ID, None)
+        except Exception as e:
+            self.format_vimconn_exception(e)
+
+    def get_flavor_id_from_data(self, flavor_dict):
+        self.logger.debug("Getting flavor id from data")
+        self._reload_connection()
+        vm_sizes_list = [vm_size.serialize() for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)]
+
+        cpus = flavor_dict['vcpus']
+        memMB = flavor_dict['ram']
+
+        filteredSizes = [size for size in vm_sizes_list if size['numberOfCores'] > cpus and size['memoryInMB'] > memMB]
+        listedFilteredSizes = sorted(filteredSizes, key=lambda k: k['numberOfCores'])
+
+        return listedFilteredSizes[0]['name']
+
+    def check_vim_connectivity(self):
+        try:
+            self._reload_connection()
+            return True
+        except Exception as e:
+            raise vimconn.vimconnException("Connectivity issue with Azure API: {}".format(e))
+
+    def get_network(self, net_id):
+        resGroup = self._get_resource_group_name_from_resource_id(net_id)
+        resName = self._get_resource_name_from_resource_id(net_id)
+        
+        self._reload_connection()
+        vnet = self.conn_vnet.virtual_networks.get(resGroup, resName)
+
+        return vnet
+
+    def delete_network(self, net_id):
+        resGroup = self._get_resource_group_name_from_resource_id(net_id)
+        resName = self._get_resource_name_from_resource_id(net_id)
+        
+        self._reload_connection()
+        self.conn_vnet.virtual_networks.delete(resGroup, resName)
+
+    def delete_vminstance(self, vm_id):
+        resGroup = self._get_resource_group_name_from_resource_id(net_id)
+        resName = self._get_resource_name_from_resource_id(net_id)
+        
+        self._reload_connection()
+        self.conn_compute.virtual_machines.delete(resGroup, resName)
+
+    def get_vminstance(self, vm_id):
+        resGroup = self._get_resource_group_name_from_resource_id(net_id)
+        resName = self._get_resource_name_from_resource_id(net_id)
+        
+        self._reload_connection()
+        vm=self.conn_compute.virtual_machines.get(resGroup, resName)
+
+        return vm
+
+    def get_flavor(self, flavor_id):
+        self._reload_connection()
+        for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region):
+            if vm_size.name == flavor_id :
+                return vm_size
+
+
+# TODO refresh_nets_status ver estado activo
+# TODO refresh_vms_status  ver estado activo
+# TODO get_vminstance_console  for getting console
+
+if __name__ == "__main__":
+
+    # Making some basic test
+    vim_id='azure'
+    vim_name='azure'
+    needed_test_params = {
+        "client_id": "AZURE_CLIENT_ID",
+        "secret": "AZURE_SECRET",
+        "tenant": "AZURE_TENANT",
+        "resource_group": "AZURE_RESOURCE_GROUP",
+        "subscription_id": "AZURE_SUBSCRIPTION_ID",
+        "vnet_name": "AZURE_VNET_NAME",
+    }
+    test_params = {}
+
+    for param, env_var in needed_test_params.items():
+        value = getenv(env_var)
+        if not value:
+            raise Exception("Provide a valid value for env '{}'".format(env_var))
+        test_params[param] = value
+
+    config = {
+            'region_name': getenv("AZURE_REGION_NAME", 'westeurope'),
+            'resource_group': getenv("AZURE_RESOURCE_GROUP"),
+            'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"),
+            'pub_key': getenv("AZURE_PUB_KEY", None),
+            'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'),
+    }
+
+    virtualMachine = {
+        'name': 'sergio',
+        'description': 'new VM',
+        'status': 'running',
+        'image': {
+            'publisher': 'Canonical',
+            'offer': 'UbuntuServer',
+            'sku': '16.04.0-LTS',
+            'version': 'latest'
+        },
+        'hardware_profile': {
+            'vm_size': 'Standard_DS1_v2'
+        },
+        'networks': [
+            'sergio'
+        ]
+    }
+
+    vnet_config = {
+        'subnet_address': '10.1.2.0/24',
+        #'subnet_name': 'subnet-oam'
+    }
+    ###########################
+
+    azure = vimconnector(vim_id, vim_name, tenant_id=test_params["tenant"], tenant_name=None, url=None, url_admin=None,
+                         user=test_params["client_id"], passwd=test_params["secret"], log_level=None, config=config)
+
+    # azure.get_flavor_id_from_data("here")
+    # subnets=azure.get_network_list()
+    # azure.new_vminstance(virtualMachine['name'], virtualMachine['description'], virtualMachine['status'],
+    #                      virtualMachine['image'], virtualMachine['hardware_profile']['vm_size'], subnets)
+
+    azure.get_flavor("Standard_A11")
diff --git a/RO-VIM-azure/requirements.txt b/RO-VIM-azure/requirements.txt
new file mode 100644 (file)
index 0000000..920d03a
--- /dev/null
@@ -0,0 +1,20 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+azure
+
diff --git a/RO-VIM-azure/setup.py b/RO-VIM-azure/setup.py
new file mode 100644 (file)
index 0000000..557feda
--- /dev/null
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_azure"
+
+README = """
+===========
+osm-rovim_azure
+===========
+
+osm-ro pluging for azure VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for azure',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    author_email='alfonso.tiernosepulveda@telefonica.com',
+    maintainer='Alfonso Tierno',
+    maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=["requests", "netaddr", "PyYAML", "azure", "osm-ro"],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_azure = osm_rovim_azure.vimconn_azure'],
+    },
+)
diff --git a/RO-VIM-azure/stdeb.cfg b/RO-VIM-azure/stdeb.cfg
new file mode 100644 (file)
index 0000000..968c55e
--- /dev/null
@@ -0,0 +1,19 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro, python3-pip
+
diff --git a/RO-VIM-azure/tox.ini b/RO-VIM-azure/tox.ini
new file mode 100644 (file)
index 0000000..9bc1472
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_azure --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_azure.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-VIM-fos/Makefile b/RO-VIM-fos/Makefile
new file mode 100644 (file)
index 0000000..2693453
--- /dev/null
@@ -0,0 +1,24 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_fos-*.tar.gz osm_rovim_fos.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-rovim-fos.postinst deb_dist/osm-rovim-fos*/debian/
+       cd deb_dist/osm-rovim-fos*/ && dpkg-buildpackage -rfakeroot -uc -us
diff --git a/RO-VIM-fos/debian/python3-osm-rovim-fos.postinst b/RO-VIM-fos/debian/python3-osm-rovim-fos.postinst
new file mode 100755 (executable)
index 0000000..744b26f
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROVIM-FOS"
+
+#Pip packages required for vmware connector
+python3 -m pip install fog05rest>=0.0.4
+
diff --git a/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py
new file mode 100644 (file)
index 0000000..90b0e7e
--- /dev/null
@@ -0,0 +1,879 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2019 ADLINK Technology Inc..
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+#
+
+"""
+Eclipse fog05 connector, implements methods to interact with fog05 using REST Client + REST Proxy
+
+Manages LXD containers on x86_64 by default, currently missing EPA and VF/PF
+Support config dict:
+    - arch : cpu architecture for the VIM
+    - hypervisor: virtualization technology supported by the VIM, can
+                can be one of: LXD, KVM, BARE, XEN, DOCKER, MCU
+                the selected VIM need to have at least a node with support
+                for the selected hypervisor
+
+"""
+__author__="Gabriele Baldoni"
+__date__ ="$13-may-2019 10:35:12$"
+
+import uuid
+import socket
+import struct
+from . import vimconn
+import random
+import yaml
+from functools import partial
+from fog05rest import FIMAPI
+from fog05rest import fimerrors
+
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                 config={}, persistent_info={}):
+        """Constructor of VIM
+        Params:
+            'uuid': id asigned to this VIM
+            'name': name assigned to this VIM, can be used for logging
+            'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+            'url_admin': (optional), url used for administrative tasks
+            'user', 'passwd': credentials of the VIM user
+            'log_level': provider if it should use a different log_level than the general one
+            'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
+                    at creation and particular VIM config at teh attachment
+            'persistent_info': dict where the class can store information that will be available among class
+                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                    empty dict. Useful to store login/tokens information for speed up communication
+
+        Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
+            check against the VIM
+        """
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                      config, persistent_info)
+
+        self.logger.debug('vimconn_fos init with config: {}'.format(config))
+        self.arch = config.get('arch', 'x86_64')
+        self.hv = config.get('hypervisor', 'LXD')
+        self.nodes = config.get('nodes', [])
+        self.fdu_node_map = {}
+        self.fos_api = FIMAPI(locator=self.url)
+
+
+    def __get_ip_range(self, first, count):
+        int_first = struct.unpack('!L', socket.inet_aton(first))[0]
+        int_last = int_first + count
+        last = socket.inet_ntoa(struct.pack('!L', int_last))
+        return (first, last)
+
+    def __name_filter(self, desc, filter_name=None):
+        if filter_name is None:
+            return True
+        return desc.get('name') == filter_name
+
+    def __id_filter(self, desc, filter_id=None):
+        if filter_id is None:
+            return True
+        return desc.get('uuid') == filter_id
+
+    def __checksum_filter(self, desc, filter_checksum=None):
+        if filter_checksum is None:
+            return True
+        return desc.get('checksum') == filter_checksum
+
+    def check_vim_connectivity(self):
+        """Checks VIM can be reached and user credentials are ok.
+        Returns None if success or raised vimconnConnectionException, vimconnAuthException, ...
+        """
+        try:
+            self.fos_api.check()
+            return None
+        except fimerrors.FIMAuthExcetpion as fae:
+            raise vimconn.vimconnAuthException("Unable to authenticate to the VIM. Error {}".format(fae))
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns the network identifier on success or raises and exception on failure
+        """
+        self.logger.debug('new_network: {}'.format(locals()))
+        if net_type in ['data','ptp']:
+            raise vimconn.vimconnNotImplemented('{} type of network not supported'.format(net_type))
+
+        net_uuid = '{}'.format(uuid.uuid4())
+        desc = {
+            'uuid':net_uuid,
+            'name':net_name,
+            'net_type':'ELAN',
+            'is_mgmt':False
+            }
+
+        if ip_profile is not None:
+            ip = {}
+            if ip_profile.get('ip_version') == 'IPv4':
+                ip_info = {}
+                ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count'))
+                dhcp_range = '{},{}'.format(ip_range[0],ip_range[1])
+                ip.update({'subnet':ip_profile.get('subnet_address')})
+                ip.update({'dns':ip_profile.get('dns', None)})
+                ip.update({'dhcp_enable':ip_profile.get('dhcp_enabled', False)})
+                ip.update({'dhcp_range': dhcp_range})
+                ip.update({'gateway':ip_profile.get('gateway_address', None)})
+                desc.update({'ip_configuration':ip_info})
+            else:
+                raise vimconn.vimconnNotImplemented('IPV6 network is not implemented at VIM')
+            desc.update({'ip_configuration':ip})
+        self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc))
+        try:
+            self.fos_api.network.add_network(desc)
+        except fimerrors.FIMAResouceExistingException as free:
+            raise vimconn.vimconnConflictException("Network already exists at VIM. Error {}".format(free))
+        except Exception as e:
+            raise vimconn.vimconnException("Unable to create network {}. Error {}".format(net_name, e))
+            # No way from the current rest service to get the actual error, most likely it will be an already existing error
+        return net_uuid
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Params:
+            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                name: string  => returns only networks with this name
+                id:   string  => returns networks with this VIM id, this imply returns one network at most
+                shared: boolean >= returns only networks that are (or are not) shared
+                tenant_id: sting => returns only networks that belong to this tenant/project
+                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+        Returns the network list of dictionaries. each dictionary contains:
+            'id': (mandatory) VIM network id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+        self.logger.debug('get_network_list: {}'.format(filter_dict))
+        res = []
+        try:
+            nets = self.fos_api.network.list()
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("Cannot get network list from VIM, connection error. Error {}".format(e))
+
+        filters = [
+            partial(self.__name_filter, filter_name=filter_dict.get('name')),
+            partial(self.__id_filter,filter_id=filter_dict.get('id'))
+        ]
+
+        r1 = []
+
+        for n in nets:
+            match = True
+            for f in filters:
+                match = match and f(n)
+            if match:
+                r1.append(n)
+
+        for n in r1:
+            osm_net = {
+                'id':n.get('uuid'),
+                'name':n.get('name'),
+                'status':'ACTIVE'
+            }
+            res.append(osm_net)
+        return res
+
+    def get_network(self, net_id):
+        """Obtain network details from the 'net_id' VIM network
+        Return a dict that contains:
+            'id': (mandatory) VIM network id, that is, net_id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        Raises an exception upon error or when network is not found
+        """
+        self.logger.debug('get_network: {}'.format(net_id))
+        res = self.get_network_list(filter_dict={'id':net_id})
+        if len(res) == 0:
+            raise vimconn.vimconnNotFoundException("Network {} not found at VIM".format(net_id))
+        return res[0]
+
+    def delete_network(self, net_id):
+        """Deletes a tenant network from VIM
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+        self.logger.debug('delete_network: {}'.format(net_id))
+        try:
+            self.fos_api.network.remove_network(net_id)
+        except fimerrors.FIMNotFoundException as fnfe:
+            raise vimconn.vimconnNotFoundException("Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe))
+        except Exception as e:
+            raise vimconn.vimconnException("Cannot delete network {} from VIM. Error {}".format(net_id, e))
+        return net_id
+
+    def refresh_nets_status(self, net_list):
+        """Get the status of the networks
+        Params:
+            'net_list': a list with the VIM network id to be get the status
+        Returns a dictionary with:
+            'net_id':         #VIM id of this network
+                status:     #Mandatory. Text with one of:
+                    #  DELETED (not found at vim)
+                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                    #  OTHER (Vim reported other status not understood)
+                    #  ERROR (VIM indicates an ERROR status)
+                    #  ACTIVE, INACTIVE, DOWN (admin down),
+                    #  BUILD (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+            'net_id2': ...
+        """
+        self.logger.debug('Refeshing network status with args: {}'.format(locals()))
+        r = {}
+        for n in net_list:
+            try:
+                osm_n = self.get_network(n)
+                r.update({
+                    osm_n.get('id'):{'status':osm_n.get('status')}
+                })
+            except vimconn.vimconnNotFoundException:
+                r.update({
+                    n:{'status':'VIM_ERROR'}
+                })
+        return r
+
+    def get_flavor(self, flavor_id):
+        """Obtain flavor details from the VIM
+        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+        Raises an exception upon error or if not found
+        """
+        self.logger.debug('VIM get_flavor with args: {}'.format(locals()))
+        try:
+            r = self.fos_api.flavor.get(flavor_id)
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        if r is None:
+            raise vimconn.vimconnNotFoundException("Flavor not found at VIM")
+        return {'id':r.get('uuid'), 'name':r.get('name'), 'fos':r}
+
+    def get_flavor_id_from_data(self, flavor_dict):
+        """Obtain flavor id that match the flavor description
+        Params:
+            'flavor_dict': dictionary that contains:
+                'disk': main hard disk in GB
+                'ram': meomry in MB
+                'vcpus': number of virtual cpus
+                #TODO: complete parameters for EPA
+        Returns the flavor_id or raises a vimconnNotFoundException
+        """
+        self.logger.debug('VIM get_flavor_id_from_data with args : {}'.format(locals()))
+
+        try:
+            flvs = self.fos_api.flavor.list()
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and x.get('ram_size_mb') == flavor_dict.get('ram') and x.get('storage_size_gb') == flavor_dict.get('disk'))]
+        if len(r) == 0:
+            raise vimconn.vimconnNotFoundException ( "No flavor found" )
+        return r[0]
+
+    def new_flavor(self, flavor_data):
+        """Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address
+                disk: disk size
+                is_public:
+                 #TODO to concrete
+        Returns the flavor identifier"""
+        self.logger.debug('VIM new_flavor with args: {}'.format(locals()))
+        flv_id = '{}'.format(uuid.uuid4())
+        desc = {
+            'uuid':flv_id,
+            'name':flavor_data.get('name'),
+            'cpu_arch': self.arch,
+            'cpu_min_count': flavor_data.get('vcpus'),
+            'cpu_min_freq': 0.0,
+            'ram_size_mb':float(flavor_data.get('ram')),
+            'storage_size_gb':float(flavor_data.get('disk'))
+        }
+        try:
+            self.fos_api.flavor.add(desc)
+        except fimerrors.FIMAResouceExistingException as free:
+            raise vimconn.vimconnConflictException("Flavor {} already exist at VIM. Error {}".format(flv_id, free))
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        return flv_id
+
+
+    def delete_flavor(self, flavor_id):
+        """Deletes a tenant flavor from VIM identify by its id
+        Returns the used id or raise an exception"""
+        try:
+            self.fos_api.flavor.remove(flavor_id)
+        except fimerrors.FIMNotFoundException as fnfe:
+            raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe))
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        return flavor_id
+
+    def new_image(self, image_dict):
+        """ Adds a tenant image to VIM. imge_dict is a dictionary with:
+            name: name
+            disk_format: qcow2, vhd, vmdk, raw (by default), ...
+            location: path or URI
+            public: "yes" or "no"
+            metadata: metadata of the image
+        Returns the image id or raises an exception if failed
+        """
+        self.logger.debug('VIM new_image with args: {}'.format(locals()))
+        img_id = '{}'.format(uuid.uuid4())
+        desc = {
+            'name':image_dict.get('name'),
+            'uuid':img_id,
+            'uri':image_dict.get('location')
+        }
+        try:
+            self.fos_api.image.add(desc)
+        except fimerrors.FIMAResouceExistingException as free:
+            raise vimconn.vimconnConflictException("Image {} already exist at VIM. Error {}".format(img_id, free))
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        return img_id
+
+    def get_image_id_from_path(self, path):
+
+        """Get the image id from image path in the VIM database.
+           Returns the image_id or raises a vimconnNotFoundException
+        """
+        self.logger.debug('VIM get_image_id_from_path with args: {}'.format(locals()))
+        try:
+            imgs = self.fos_api.image.list()
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        res = [x.get('uuid') for x in imgs if x.get('uri')==path]
+        if len(res) == 0:
+            raise vimconn.vimconnNotFoundException("Image with this path was not found")
+        return res[0]
+
+    def get_image_list(self, filter_dict={}):
+        """Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        """
+        self.logger.debug('VIM get_image_list args: {}'.format(locals()))
+        r = []
+        try:
+            fimgs = self.fos_api.image.list()
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+
+        filters = [
+            partial(self.__name_filter, filter_name=filter_dict.get('name')),
+            partial(self.__id_filter,filter_id=filter_dict.get('id')),
+            partial(self.__checksum_filter,filter_checksum=filter_dict.get('checksum'))
+        ]
+
+        r1 = []
+
+        for i in fimgs:
+            match = True
+            for f in filters:
+                match = match and f(i)
+            if match:
+                r1.append(i)
+
+        for i in r1:
+            img_info = {
+                'name':i.get('name'),
+                'id':i.get('uuid'),
+                'checksum':i.get('checksum'),
+                'location':i.get('uri'),
+                'fos':i
+            }
+            r.append(img_info)
+        return r
+        #raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+        availability_zone_index=None, availability_zone_list=None):
+        """Adds a VM instance to VIM
+        Params:
+            'start': (boolean) indicates if VM must start or created in pause mode.
+            'image_id','flavor_id': image and flavor VIM id to use for the VM
+            'net_list': list of interfaces, each one is a dictionary with:
+                'name': (optional) name for the interface.
+                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                'mac_address': (optional) mac address to assign to this interface
+                'ip_address': (optional) IP address to assign to this interface
+                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                'type': (mandatory) can be one of:
+                    'virtual', in this case always connected to a network of type 'net_type=bridge'
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                            are allocated on the same physical NIC
+                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                or True, it must apply the default VIM behaviour
+                After execution the method will add the key:
+                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                        interface. 'net_list' is modified
+            'cloud_config': (optional) dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        self.logger.debug('new_vminstance with rgs: {}'.format(locals()))
+        fdu_uuid = '{}'.format(uuid.uuid4())
+
+        flv = self.fos_api.flavor.get(flavor_id)
+        img = self.fos_api.image.get(image_id)
+
+        if flv is None:
+            raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM".format(flavor_id))
+        if img is None:
+            raise vimconn.vimconnNotFoundException("Image {} not found at VIM".format(image_id))
+
+        created_items = {
+            'fdu_id':'',
+            'node_id':'',
+            'connection_points':[]
+            }
+
+        fdu_desc = {
+            'name':name,
+            'uuid':fdu_uuid,
+            'computation_requirements':flv,
+            'image':img,
+            'hypervisor':self.hv,
+            'migration_kind':'LIVE',
+            'interfaces':[],
+            'io_ports':[],
+            'connection_points':[],
+            'depends_on':[]
+        }
+
+        nets = []
+        cps = []
+        intf_id = 0
+        for n in net_list:
+            cp_id = '{}'.format(uuid.uuid4())
+            n.update({'vim_id':cp_id})
+            pair_id = n.get('net_id')
+
+            cp_d = {
+                'uuid':cp_id,
+                'pair_id':pair_id
+            }
+            intf_d = {
+                'name':n.get('name','eth{}'.format(intf_id)),
+                'is_mgmt':False,
+                'if_type':'INTERNAL',
+                'virtual_interface':{
+                    'intf_type':n.get('model','VIRTIO'),
+                    'vpci':n.get('vpci','0:0:0'),
+                    'bandwidth':int(n.get('bw', 100))
+                }
+            }
+            if n.get('mac_address', None) is not None:
+                intf_d['mac_address'] = n['mac_address']
+
+            created_items['connection_points'].append(cp_id)
+            fdu_desc['connection_points'].append(cp_d)
+            fdu_desc['interfaces'].append(intf_d)
+
+            intf_id = intf_id + 1
+
+        if cloud_config is not None:
+            configuration = {
+                    'conf_type':'CLOUD_INIT'
+                }
+            if cloud_config.get('user-data') is not None:
+                configuration.update({'script':cloud_config.get('user-data')})
+            if cloud_config.get('key-pairs') is not None:
+                configuration.update({'ssh_keys':cloud_config.get('key-pairs')})
+
+            if 'script' in configuration:
+                fdu_desc.update({'configuration':configuration})
+
+        ### NODE Selection ###
+        # Infrastructure info
+        #   nodes dict with
+        #        uuid -> node uuid
+        #        computational capabilities -> cpu, ram, and disk available
+        #        hypervisors -> list of available hypervisors (eg. KVM, LXD, BARE)
+        #
+        #
+
+        # UPDATING AVAILABLE INFRASTRUCTURE
+
+        if len(self.nodes) == 0:
+            nodes_id = self.fos_api.node.list()
+        else:
+            nodes_id = self.nodes
+        nodes = []
+        for n in nodes_id:
+            n_info = self.fos_api.node.info(n)
+            if n_info is None:
+                continue
+            n_plugs = []
+            for p in self.fos_api.node.plugins(n):
+                n_plugs.append(self.fos_api.plugin.info(n,p))
+
+            n_cpu_number =  len(n_info.get('cpu'))
+            n_cpu_arch = n_info.get('cpu')[0].get('arch')
+            n_cpu_freq = n_info.get('cpu')[0].get('frequency')
+            n_ram = n_info.get('ram').get('size')
+            n_disk_size = sorted(list(filter(lambda x: 'sda' in x['local_address'], n_info.get('disks'))), key= lambda k: k['dimension'])[-1].get('dimension')
+
+            hvs = []
+            for p in n_plugs:
+                if p.get('type') == 'runtime':
+                    hvs.append(p.get('name'))
+
+            ni = {
+                'uuid':n,
+                'computational_capabilities':{
+                    'cpu_count':n_cpu_number,
+                    'cpu_arch':n_cpu_arch,
+                    'cpu_freq':n_cpu_freq,
+                    'ram_size':n_ram,
+                    'disk_size':n_disk_size
+                },
+                'hypervisors':hvs
+            }
+            nodes.append(ni)
+
+        # NODE SELECTION
+        compatible_nodes = []
+        for n in nodes:
+            if fdu_desc.get('hypervisor') in n.get('hypervisors'):
+                n_comp = n.get('computational_capabilities')
+                f_comp = fdu_desc.get('computation_requirements')
+                if f_comp.get('cpu_arch') == n_comp.get('cpu_arch'):
+                    if f_comp.get('cpu_min_count') <= n_comp.get('cpu_count') and f_comp.get('ram_size_mb') <= n_comp.get('ram_size'):
+                        if f_comp.get('disk_size_gb') <= n_comp.get('disk_size'):
+                            compatible_nodes.append(n)
+
+        if len(compatible_nodes) == 0:
+            raise vimconn.vimconnConflictException("No available nodes at VIM")
+        selected_node = random.choice(compatible_nodes)
+
+        created_items.update({'fdu_id':fdu_uuid, 'node_id': selected_node.get('uuid')})
+
+        self.logger.debug('FOS Node {} FDU Descriptor: {}'.format(selected_node.get('uuid'), fdu_desc))
+
+        try:
+            self.fos_api.fdu.onboard(fdu_desc)
+            instanceid = self.fos_api.fdu.instantiate(fdu_uuid, selected_node.get('uuid'))
+            created_items.update({'instance_id':instanceid})
+
+            self.fdu_node_map.update({instanceid: selected_node.get('uuid')})
+            self.logger.debug('new_vminstance return: {}'.format((fdu_uuid, created_items)))
+            return (instanceid, created_items)
+        except fimerrors.FIMAResouceExistingException as free:
+            raise vimconn.vimconnConflictException("VM already exists at VIM. Error {}".format(free))
+        except Exception as e:
+            raise vimconn.vimconnException("Error while instantiating VM {}. Error {}".format(name, e))
+
+
+    def get_vminstance(self,vm_id):
+        """Returns the VM instance information from VIM"""
+        self.logger.debug('VIM get_vminstance with args: {}'.format(locals()))
+
+        try:
+            intsinfo = self.fos_api.fdu.instance_info(vm_id)
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+        if intsinfo is None:
+            raise vimconn.vimconnNotFoundException('VM with id {} not found!'.format(vm_id))
+        return intsinfo
+
+
+    def delete_vminstance(self, vm_id, created_items=None):
+        """
+        Removes a VM instance from VIM and each associate elements
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
+            action_vminstance
+        :return: None or the same vm_id. Raises an exception on fail
+        """
+        self.logger.debug('FOS delete_vminstance with args: {}'.format(locals()))
+        fduid =  created_items.get('fdu_id')
+        try:
+            self.fos_api.fdu.terminate(vm_id)
+            self.fos_api.fdu.offload(fduid)
+        except Exception as e:
+            raise vimconn.vimconnException("Error on deletting VM with id {}. Error {}".format(vm_id,e))
+        return vm_id
+
+        #raise vimconnNotImplemented( "Should have implemented this" )
+
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                #  BUILD (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces: list with interface info. Each item a dictionary with:
+                        vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected, if provided at creation
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+                        compute_node:     #identification of compute node where PF,VF interface is allocated
+                        pci:              #PCI address of the NIC that hosts the PF,VF
+                        vlan:             #physical VLAN used for VF
+        """
+        self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals()))
+        fos2osm_status = {
+            'DEFINE':'OTHER',
+            'CONFIGURE':'INACTIVE',
+            'RUN':'ACTIVE',
+            'PAUSE':'PAUSED',
+            'ERROR':'ERROR'
+        }
+
+        r = {}
+
+        for vm in vm_list:
+            self.logger.debug('FOS refresh_vms_status for {}'.format(vm))
+
+            info = {}
+            nid = self.fdu_node_map.get(vm)
+            if nid is None:
+                r.update({vm:{
+                    'status':'VIM_ERROR',
+                    'error_msg':'Not compute node associated for VM'
+                }})
+                continue
+
+            try:
+                vm_info = self.fos_api.fdu.instance_info(vm)
+            except:
+                r.update({vm:{
+                    'status':'VIM_ERROR',
+                    'error_msg':'unable to connect to VIM'
+                }})
+                continue
+
+            if vm_info is None:
+                r.update({vm:{'status':'DELETED'}})
+                continue
+
+
+            desc = self.fos_api.fdu.info(vm_info['fdu_uuid'])
+            osm_status = fos2osm_status.get(vm_info.get('status'))
+
+            self.logger.debug('FOS status info {}'.format(vm_info))
+            self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status))
+            info.update({'status':osm_status})
+            if vm_info.get('status') == 'ERROR':
+                info.update({'error_msg':vm_info.get('error_code')})
+            info.update({'vim_info':yaml.safe_dump(vm_info)})
+            faces = []
+            i = 0
+            for intf_name in vm_info.get('hypervisor_info').get('network',[]):
+                intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name)
+                face = {}
+                face['compute_node'] = nid
+                face['vim_info'] = yaml.safe_dump(intf_info)
+                face['mac_address'] = intf_info.get('hwaddr')
+                addrs = []
+                for a in intf_info.get('addresses'):
+                    addrs.append(a.get('address'))
+                if len(addrs) >= 0:
+                    face['ip_address'] = ','.join(addrs)
+                else:
+                    face['ip_address'] = ''
+                face['pci'] = '0:0:0.0'
+                # getting net id by CP
+                try:
+                    cp_info = vm_info.get('connection_points')[i]
+                except IndexError:
+                    cp_info = None
+                if cp_info is not None:
+                    cp_id = cp_info['cp_uuid']
+                    cps_d = desc['connection_points']
+                    matches = [x for x in cps_d if x['uuid'] == cp_id]
+                    if len(matches) > 0:
+                        cpd = matches[0]
+                        face['vim_net_id'] = cpd.get('pair_id','')
+                    else:
+                        face['vim_net_id'] = ''
+                    face['vim_interface_id'] = cp_id
+                    # cp_info.get('uuid')
+                else:
+                    face['vim_net_id'] = ''
+                    face['vim_interface_id'] = intf_name
+                faces.append(face)
+                i += 1
+
+
+
+            info.update({'interfaces':faces})
+            r.update({vm:info})
+            self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info))
+        self.logger.debug('FOS refresh_vms_status res is {}'.format(r))
+        return r
+
+
+        #raise vimconnNotImplemented( "Should have implemented this" )
+
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        """
+        Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.
+        created_items is a dictionary with items that
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+        :param action_dict: dictionary with the action to perform
+        :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to
+            the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector
+            dependent, but do not use nested dictionaries and a value of None should be the same as not present. This
+            method can modify this value
+        :return: None, or a console dict
+        """
+        self.logger.debug('VIM action_vminstance with args: {}'.format(locals()))
+        nid = self.fdu_node_map.get(vm_id)
+        if nid is None:
+            raise vimconn.vimconnNotFoundException('No node for this VM')
+        try:
+            fdu_info = self.fos_api.fdu.instance_info(vm_id)
+            if "start" in action_dict:
+                if fdu_info.get('status') == 'CONFIGURE':
+                    self.fos_api.fdu.start(vm_id)
+                elif fdu_info.get('status') == 'PAUSE':
+                    self.fos_api.fdu.resume(vm_id)
+                else:
+                    raise vimconn.vimconnConflictException("Cannot start from this state")
+            elif "pause" in action_dict:
+                if fdu_info.get('status') == 'RUN':
+                    self.fos_api.fdu.pause(vm_id)
+                else:
+                    raise vimconn.vimconnConflictException("Cannot pause from this state")
+            elif "resume" in action_dict:
+                if fdu_info.get('status') == 'PAUSE':
+                    self.fos_api.fdu.resume(vm_id)
+                else:
+                    raise vimconn.vimconnConflictException("Cannot resume from this state")
+            elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict:
+                if fdu_info.get('status') == 'RUN':
+                    self.fos_api.fdu.stop(vm_id)
+                else:
+                    raise vimconn.vimconnConflictException("Cannot shutoff from this state")
+            elif "terminate" in action_dict:
+                if fdu_info.get('status') == 'RUN':
+                    self.fos_api.fdu.stop(vm_id)
+                    self.fos_api.fdu.clean(vm_id)
+                    self.fos_api.fdu.undefine(vm_id)
+                    # self.fos_api.fdu.offload(vm_id)
+                elif fdu_info.get('status') == 'CONFIGURE':
+                    self.fos_api.fdu.clean(vm_id)
+                    self.fos_api.fdu.undefine(vm_id)
+                    # self.fos_api.fdu.offload(vm_id)
+                elif fdu_info.get('status') == 'PAUSE':
+                    self.fos_api.fdu.resume(vm_id)
+                    self.fos_api.fdu.stop(vm_id)
+                    self.fos_api.fdu.clean(vm_id)
+                    self.fos_api.fdu.undefine(vm_id)
+                    # self.fos_api.fdu.offload(vm_id)
+                else:
+                    raise vimconn.vimconnConflictException("Cannot terminate from this state")
+            elif "rebuild" in action_dict:
+                raise vimconnNotImplemented("Rebuild not implememnted")
+            elif "reboot" in action_dict:
+                if fdu_info.get('status') == 'RUN':
+                    self.fos_api.fdu.stop(vm_id)
+                    self.fos_api.fdu.start(vm_id)
+                else:
+                    raise vimconn.vimconnConflictException("Cannot reboot from this state")
+        except Exception as e:
+            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
diff --git a/RO-VIM-fos/requirements.txt b/RO-VIM-fos/requirements.txt
new file mode 100644 (file)
index 0000000..d75501b
--- /dev/null
@@ -0,0 +1,21 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+fog05rest>=0.0.4
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
diff --git a/RO-VIM-fos/setup.py b/RO-VIM-fos/setup.py
new file mode 100644 (file)
index 0000000..95d97ca
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_fos"
+
+README = """
+===========
+osm-rovim_fos
+===========
+
+osm-ro pluging for fos VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for fos',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    # TODO py3 author_email='',
+    maintainer='OSM_TECH@LIST.ETSI.ORG',  # TODO py3
+    # TODO py3 maintainer_email='',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=[
+        "requests", "netaddr", "PyYAML", "osm-ro", "fog05rest>=0.0.4"
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_fos = osm_rovim_fos.vimconn_fos'],
+    },
+)
diff --git a/RO-VIM-fos/stdeb.cfg b/RO-VIM-fos/stdeb.cfg
new file mode 100644 (file)
index 0000000..cf4b353
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-pip, python3-requests, python3-netaddr, python3-yaml, python3-osm-ro
diff --git a/RO-VIM-fos/tox.ini b/RO-VIM-fos/tox.ini
new file mode 100644 (file)
index 0000000..297800b
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_fos --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_fos.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-VIM-opennebula/Makefile b/RO-VIM-opennebula/Makefile
new file mode 100644 (file)
index 0000000..2ec6a44
--- /dev/null
@@ -0,0 +1,26 @@
+##
+# Copyright 2017  Telefonica Digital Spain S.L.U.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_opennebula-*.tar.gz osm_rovim_opennebula.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-rovim-opennebula.postinst deb_dist/osm-rovim-opennebula*/debian/
+       cd deb_dist/osm-rovim-opennebula*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst b/RO-VIM-opennebula/debian/python3-osm-rovim-opennebula.postinst
new file mode 100755 (executable)
index 0000000..27aacc7
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROVIM-OPENNEBULA"
+
+#Pip packages required for opennebula connector
+python3 -m pip install -e git+https://github.com/python-oca/python-oca#egg=oca
+python3 -m pip install untangle
+python3 -m pip install pyone
+
diff --git a/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py
new file mode 100644 (file)
index 0000000..440b1cb
--- /dev/null
@@ -0,0 +1,684 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2017  Telefonica Digital Spain S.L.U.
+# This file is part of ETSI OSM
+#  All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: patent-office@telefonica.com
+##
+
+"""
+vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API.
+"""
+__author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \
+             "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
+__date__ = "$13-dec-2017 11:09:29$"
+from osm_ro import vimconn
+import requests
+import logging
+import oca
+import untangle
+import math
+import random
+import pyone
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                 log_level="DEBUG", config={}, persistent_info={}):
+
+        """Constructor of VIM
+        Params:
+            'uuid': id asigned to this VIM
+            'name': name assigned to this VIM, can be used for logging
+            'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+            'url_admin': (optional), url used for administrative tasks
+            'user', 'passwd': credentials of the VIM user
+            'log_level': provider if it should use a different log_level than the general one
+            'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
+                    at creation and particular VIM config at teh attachment
+            'persistent_info': dict where the class can store information that will be available among class
+                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                    empty dict. Useful to store login/tokens information for speed up communication
+
+        Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
+            check against the VIM
+        """
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                      config)
+
+    def _new_one_connection(self):
+        return pyone.OneServer(self.url, session=self.user + ':' + self.passwd)
+
+    def new_tenant(self, tenant_name, tenant_description):
+        # '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+        try:
+            client = oca.Client(self.user + ':' + self.passwd, self.url)
+            group_list = oca.GroupPool(client)
+            user_list = oca.UserPool(client)
+            group_list.info()
+            user_list.info()
+            create_primarygroup = 1
+            # create group-tenant
+            for group in group_list:
+                if str(group.name) == str(tenant_name):
+                    create_primarygroup = 0
+                    break
+            if create_primarygroup == 1:
+                oca.Group.allocate(client, tenant_name)
+            group_list.info()
+            # set to primary_group the tenant_group and oneadmin to secondary_group
+            for group in group_list:
+                if str(group.name) == str(tenant_name):
+                    for user in user_list:
+                        if str(user.name) == str(self.user):
+                            if user.name == "oneadmin":
+                                return str(0)
+                            else:
+                                self._add_secondarygroup(user.id, group.id)
+                                user.chgrp(group.id)
+                                return str(group.id)
+        except Exception as e:
+            self.logger.error("Create new tenant error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def delete_tenant(self, tenant_id):
+        """Delete a tenant from VIM. Returns the old tenant identifier"""
+        try:
+            client = oca.Client(self.user + ':' + self.passwd, self.url)
+            group_list = oca.GroupPool(client)
+            user_list = oca.UserPool(client)
+            group_list.info()
+            user_list.info()
+            for group in group_list:
+                if str(group.id) == str(tenant_id):
+                    for user in user_list:
+                        if str(user.name) == str(self.user):
+                            self._delete_secondarygroup(user.id, group.id)
+                            group.delete(client)
+                    return None
+            raise vimconn.vimconnNotFoundException("Group {} not found".format(tenant_id))
+        except Exception as e:
+            self.logger.error("Delete tenant " + str(tenant_id) + " error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def _add_secondarygroup(self, id_user, id_group):
+        # change secondary_group to primary_group
+        params = '<?xml version="1.0"?> \
+                   <methodCall>\
+                   <methodName>one.user.addgroup</methodName>\
+                   <params>\
+                   <param>\
+                   <value><string>{}:{}</string></value>\
+                   </param>\
+                   <param>\
+                   <value><int>{}</int></value>\
+                   </param>\
+                   <param>\
+                   <value><int>{}</int></value>\
+                   </param>\
+                   </params>\
+                   </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
+        requests.post(self.url, params)
+
+    def _delete_secondarygroup(self, id_user, id_group):
+        params = '<?xml version="1.0"?> \
+                   <methodCall>\
+                   <methodName>one.user.delgroup</methodName>\
+                   <params>\
+                   <param>\
+                   <value><string>{}:{}</string></value>\
+                   </param>\
+                   <param>\
+                   <value><int>{}</int></value>\
+                   </param>\
+                   <param>\
+                   <value><int>{}</int></value>\
+                   </param>\
+                   </params>\
+                   </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
+        requests.post(self.url, params)
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):  # , **vim_specific):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+
+        # oca library method cannot be used in this case (problem with cluster parameters)
+        try:
+            created_items = {}
+            one = self._new_one_connection()
+            size = "254"
+            if ip_profile is None:
+                subnet_rand = random.randint(0, 255)
+                ip_start = "192.168.{}.1".format(subnet_rand)
+            else:
+                index = ip_profile["subnet_address"].find("/")
+                ip_start = ip_profile["subnet_address"][:index]
+                if "dhcp_count" in ip_profile and ip_profile["dhcp_count"] is not None:
+                    size = str(ip_profile["dhcp_count"])
+                elif "dhcp_count" not in ip_profile and ip_profile["ip_version"] == "IPv4":
+                    prefix = ip_profile["subnet_address"][index + 1:]
+                    size = int(math.pow(2, 32 - prefix))
+                if "dhcp_start_address" in ip_profile and ip_profile["dhcp_start_address"] is not None:
+                    ip_start = str(ip_profile["dhcp_start_address"])
+                if ip_profile["ip_version"] == "IPv6":
+                    ip_prefix_type = "GLOBAL_PREFIX"
+
+            if vlan is not None:
+                vlan_id = vlan
+            else:
+                vlan_id = str(random.randint(100, 4095))
+            #if "internal" in net_name:
+            # OpenNebula not support two networks with same name
+            random_net_name = str(random.randint(1, 1000000))
+            net_name = net_name + random_net_name
+            net_id = one.vn.allocate({
+                        'NAME': net_name,
+                        'VN_MAD': '802.1Q',
+                        'PHYDEV': self.config["network"]["phydev"],
+                        'VLAN_ID': vlan_id
+                    }, self.config["cluster"]["id"])
+            arpool = {'AR_POOL': {
+                        'AR': {
+                            'TYPE': 'IP4',
+                            'IP': ip_start,
+                            'SIZE': size
+                        }
+                    }
+            }
+            one.vn.add_ar(net_id, arpool)
+            return net_id, created_items
+        except Exception as e:
+            self.logger.error("Create new network error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Params:
+            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                name: string  => returns only networks with this name
+                id:   string  => returns networks with this VIM id, this imply returns one network at most
+                shared: boolean >= returns only networks that are (or are not) shared
+                tenant_id: sting => returns only networks that belong to this tenant/project
+                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+        Returns the network list of dictionaries. each dictionary contains:
+            'id': (mandatory) VIM network id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+
+        try:
+            one = self._new_one_connection()
+            net_pool = one.vnpool.info(-2, -1, -1).VNET
+            response = []
+            if "name" in filter_dict:
+                network_name_filter = filter_dict["name"]
+            else:
+                network_name_filter = None
+            if "id" in filter_dict:
+                network_id_filter = filter_dict["id"]
+            else:
+                network_id_filter = None
+            for network in net_pool:
+                if network.NAME == network_name_filter or str(network.ID) == str(network_id_filter):
+                    net_dict = {"name": network.NAME, "id": str(network.ID), "status": "ACTIVE"}
+                    response.append(net_dict)
+            return response
+        except Exception as e:
+            self.logger.error("Get network list error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def get_network(self, net_id):
+        """Obtain network details from the 'net_id' VIM network
+        Return a dict that contains:
+            'id': (mandatory) VIM network id, that is, net_id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        Raises an exception upon error or when network is not found
+        """
+        try:
+            one = self._new_one_connection()
+            net_pool = one.vnpool.info(-2, -1, -1).VNET
+            net = {}
+            for network in net_pool:
+                if str(network.ID) == str(net_id):
+                    net['id'] = network.ID
+                    net['name'] = network.NAME
+                    net['status'] = "ACTIVE"
+                    break
+            if net:
+                return net
+            else:
+                raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+        except Exception as e:
+            self.logger.error("Get network " + str(net_id) + " error): " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+        try:
+
+            one = self._new_one_connection()
+            one.vn.delete(int(net_id))
+            return net_id
+        except Exception as e:
+            self.logger.error("Delete network " + str(net_id) + "error: network not found" + str(e))
+            raise vimconn.vimconnException(e)
+
+    def refresh_nets_status(self, net_list):
+        """Get the status of the networks
+        Params:
+            'net_list': a list with the VIM network id to be get the status
+        Returns a dictionary with:
+            'net_id':         #VIM id of this network
+                status:     #Mandatory. Text with one of:
+                    #  DELETED (not found at vim)
+                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                    #  OTHER (Vim reported other status not understood)
+                    #  ERROR (VIM indicates an ERROR status)
+                    #  ACTIVE, INACTIVE, DOWN (admin down),
+                    #  BUILD (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+            'net_id2': ...
+        """
+        net_dict = {}
+        try:
+            for net_id in net_list:
+                net = {}
+                try:
+                    net_vim = self.get_network(net_id)
+                    net["status"] = net_vim["status"]
+                    net["vim_info"] = None
+                except vimconn.vimconnNotFoundException as e:
+                    self.logger.error("Exception getting net status: {}".format(str(e)))
+                    net['status'] = "DELETED"
+                    net['error_msg'] = str(e)
+                except vimconn.vimconnException as e:
+                    self.logger.error(e)
+                    net["status"] = "VIM_ERROR"
+                    net["error_msg"] = str(e)
+                net_dict[net_id] = net
+            return net_dict
+        except vimconn.vimconnException as e:
+            self.logger.error(e)
+            for k in net_dict:
+                net_dict[k]["status"] = "VIM_ERROR"
+                net_dict[k]["error_msg"] = str(e)
+            return net_dict
+
+    def get_flavor(self, flavor_id):  # Esta correcto
+        """Obtain flavor details from the VIM
+        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+        Raises an exception upon error or if not found
+        """
+        try:
+
+            one = self._new_one_connection()
+            template = one.template.info(int(flavor_id))
+            if template is not None:
+                return {'id': template.ID, 'name': template.NAME}
+            raise vimconn.vimconnNotFoundException("Flavor {} not found".format(flavor_id))
+        except Exception as e:
+            self.logger.error("get flavor " + str(flavor_id) + " error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def new_flavor(self, flavor_data):
+        """Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address
+                disk: disk size
+                is_public:
+                 #TODO to concrete
+        Returns the flavor identifier"""
+
+        disk_size = str(int(flavor_data["disk"])*1024)
+
+        try:
+            one = self._new_one_connection()
+            template_id = one.template.allocate({
+                'TEMPLATE': {
+                    'NAME': flavor_data["name"],
+                    'CPU': flavor_data["vcpus"],
+                    'VCPU': flavor_data["vcpus"],
+                    'MEMORY': flavor_data["ram"],
+                    'DISK': {
+                        'SIZE': disk_size
+                    },
+                    'CONTEXT': {
+                        'NETWORK': "YES",
+                        'SSH_PUBLIC_KEY': '$USER[SSH_PUBLIC_KEY]'
+                    },
+                    'GRAPHICS': {
+                        'LISTEN': '0.0.0.0',
+                        'TYPE': 'VNC'
+                    },
+                    'CLUSTER_ID': self.config["cluster"]["id"]
+                }
+            })
+            return template_id
+
+        except Exception as e:
+            self.logger.error("Create new flavor error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def delete_flavor(self, flavor_id):
+        """ Deletes a tenant flavor from VIM
+            Returns the old flavor_id
+        """
+        try:
+            one = self._new_one_connection()
+            one.template.delete(int(flavor_id), False)
+            return flavor_id
+        except Exception as e:
+            self.logger.error("Error deleting flavor " + str(flavor_id) + ". Flavor not found")
+            raise vimconn.vimconnException(e)
+
+    def get_image_list(self, filter_dict={}):
+        """Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        """
+        try:
+            one = self._new_one_connection()
+            image_pool = one.imagepool.info(-2, -1, -1).IMAGE
+            images = []
+            if "name" in filter_dict:
+                image_name_filter = filter_dict["name"]
+            else:
+                image_name_filter = None
+            if "id" in filter_dict:
+                image_id_filter = filter_dict["id"]
+            else:
+                image_id_filter = None
+            for image in image_pool:
+                if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(image_id_filter):
+                    images_dict = {"name": image.NAME, "id": str(image.ID)}
+                    images.append(images_dict)
+            return images
+        except Exception as e:
+            self.logger.error("Get image list error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                       availability_zone_index=None, availability_zone_list=None):
+
+        """Adds a VM instance to VIM
+            Params:
+                'start': (boolean) indicates if VM must start or created in pause mode.
+                'image_id','flavor_id': image and flavor VIM id to use for the VM
+                'net_list': list of interfaces, each one is a dictionary with:
+                    'name': (optional) name for the interface.
+                    'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                    'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                    'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                    'mac_address': (optional) mac address to assign to this interface
+                    'ip_address': (optional) IP address to assign to this interface
+                    #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                        the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                    'type': (mandatory) can be one of:
+                        'virtual', in this case always connected to a network of type 'net_type=bridge'
+                        'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                            can created unconnected
+                        'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                        'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                                are allocated on the same physical NIC
+                    'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                    'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                    or True, it must apply the default VIM behaviour
+                    After execution the method will add the key:
+                    'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                            interface. 'net_list' is modified
+                'cloud_config': (optional) dictionary with:
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                    'users': (optional) list of users to be inserted, each item is a dict with:
+                        'name': (mandatory) user name,
+                        'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                    'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                        or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                    'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                        'dest': (mandatory) string with the destination absolute path
+                        'encoding': (optional, by default text). Can be one of:
+                            'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                        'content' (mandatory): string with the content of the file
+                        'permissions': (optional) string with file permissions, typically octal notation '0644'
+                        'owner': (optional) file owner, string with the format 'owner:group'
+                    'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+                'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                    'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                    'size': (mandatory) string with the size of the disk in GB
+                availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+                availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                    availability_zone_index is None
+            Returns a tuple with the instance identifier and created_items or raises an exception on error
+                created_items can be None or a dictionary where this method can include key-values that will be passed to
+                the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+                Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+                as not present.
+            """
+        self.logger.debug(
+            "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list)))
+        try:
+            one = self._new_one_connection()
+            template_vim = one.template.info(int(flavor_id), True)
+            disk_size = str(template_vim.TEMPLATE["DISK"]["SIZE"])
+
+            one = self._new_one_connection()
+            template_updated = ""
+            for net in net_list:
+                net_in_vim = one.vn.info(int(net["net_id"]))
+                net["vim_id"] = str(net_in_vim.ID)
+                network = 'NIC = [NETWORK = "{}",NETWORK_UNAME = "{}" ]'.format(
+                    net_in_vim.NAME, net_in_vim.UNAME)
+                template_updated += network
+
+            template_updated += "DISK = [ IMAGE_ID = {},\n  SIZE = {}]".format(image_id, disk_size)
+
+            if isinstance(cloud_config, dict):
+                if cloud_config.get("key-pairs"):
+                    context = 'CONTEXT = [\n  NETWORK = "YES",\n  SSH_PUBLIC_KEY = "'
+                    for key in cloud_config["key-pairs"]:
+                        context += key + '\n'
+                    # if False:
+                    #     context += '"\n  USERNAME = '
+                    context += '"]'
+                    template_updated += context
+
+            vm_instance_id = one.template.instantiate(int(flavor_id), name, False, template_updated)
+            self.logger.info(
+                "Instanciating in OpenNebula a new VM name:{} id:{}".format(name, flavor_id))
+            return str(vm_instance_id), None
+        except pyone.OneNoExistsException as e:
+            self.logger.error("Network with id " + str(e) + " not found: " + str(e))
+            raise vimconn.vimconnNotFoundException(e)
+        except Exception as e:
+            self.logger.error("Create new vm instance error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def get_vminstance(self, vm_id):
+        """Returns the VM instance information from VIM"""
+        try:
+            one = self._new_one_connection()
+            vm = one.vm.info(int(vm_id))
+            return vm
+        except Exception as e:
+            self.logger.error("Getting vm instance error: " + str(e) + ": VM Instance not found")
+            raise vimconn.vimconnException(e)
+
+    def delete_vminstance(self, vm_id, created_items=None):
+        """
+        Removes a VM instance from VIM and its associated elements
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
+            action_vminstance
+        :return: None or the same vm_id. Raises an exception on fail
+        """
+        try:
+            one = self._new_one_connection()
+            one.vm.recover(int(vm_id), 3)
+            vm = None
+            while True:
+                if vm is not None and vm.LCM_STATE == 0:
+                    break
+                else:
+                    vm = one.vm.info(int(vm_id))
+
+        except pyone.OneNoExistsException as e:
+            self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted")
+            raise vimconn.vimconnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id))
+        except Exception as e:
+            self.logger.error("Delete vm instance " + str(vm_id) + " error: " + str(e))
+            raise vimconn.vimconnException(e)
+
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                #  BUILD (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces: list with interface info. Each item a dictionary with:
+                        vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected, if provided at creation
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+                        compute_node:     #identification of compute node where PF,VF interface is allocated
+                        pci:              #PCI address of the NIC that hosts the PF,VF
+                        vlan:             #physical VLAN used for VF
+        """
+        vm_dict = {}
+        try:
+            for vm_id in vm_list:
+                vm = {}
+                if self.get_vminstance(vm_id) is not None:
+                    vm_element = self.get_vminstance(vm_id)
+                else:
+                    self.logger.info("The vm " + str(vm_id) + " does not exist.")
+                    vm['status'] = "DELETED"
+                    vm['error_msg'] = ("The vm " + str(vm_id) + " does not exist.")
+                    continue
+                vm["vim_info"] = None
+                vm_status = vm_element.LCM_STATE
+                if vm_status == 3:
+                    vm['status'] = "ACTIVE"
+                elif vm_status == 36:
+                    vm['status'] = "ERROR"
+                    vm['error_msg'] = "VM failure"
+                else:
+                    vm['status'] = "BUILD"
+
+                if vm_element is not None:
+                    interfaces = self._get_networks_vm(vm_element)
+                    vm["interfaces"] = interfaces
+                vm_dict[vm_id] = vm
+            return vm_dict
+        except Exception as e:
+            self.logger.error(e)
+            for k in vm_dict:
+                vm_dict[k]["status"] = "VIM_ERROR"
+                vm_dict[k]["error_msg"] = str(e)
+            return vm_dict
+
+    def _get_networks_vm(self, vm_element):
+        interfaces = []
+        try:
+            if isinstance(vm_element.TEMPLATE["NIC"], list):
+                for net in vm_element.TEMPLATE["NIC"]:
+                    interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
+                                 "vim_interface_id": str(net["NETWORK_ID"])}
+                    # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
+                    if u'IP' in net:
+                        interface["ip_address"] = str(net["IP"])
+                    if u'IP6_GLOBAL' in net:
+                        interface["ip_address"] = str(net["IP6_GLOBAL"])
+                    interfaces.append(interface)
+            else:
+                net = vm_element.TEMPLATE["NIC"]
+                interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
+                             "vim_interface_id": str(net["NETWORK_ID"])}
+                # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
+                if u'IP' in net:
+                    interface["ip_address"] = str(net["IP"])
+                if u'IP6_GLOBAL' in net:
+                    interface["ip_address"] = str(net["IP6_GLOBAL"])
+                interfaces.append(interface)
+            return interfaces
+        except Exception as e:
+            self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID))
diff --git a/RO-VIM-opennebula/requirements.txt b/RO-VIM-opennebula/requirements.txt
new file mode 100644 (file)
index 0000000..cd2f803
--- /dev/null
@@ -0,0 +1,24 @@
+##
+# Copyright 2017  Telefonica Digital Spain S.L.U.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+untangle
+pyone
+git+https://github.com/python-oca/python-oca#egg=oca
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
diff --git a/RO-VIM-opennebula/setup.py b/RO-VIM-opennebula/setup.py
new file mode 100644 (file)
index 0000000..c27bca3
--- /dev/null
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2017  Telefonica Digital Spain S.L.U.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_opennebula"
+
+README = """
+===========
+osm-rovim_opennebula
+===========
+
+osm-ro pluging for opennebula VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for opennebula',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    # TODO py3 author_email='',
+    maintainer='OSM_TECH@LIST.ETSI.ORG',  # TODO py3
+    # TODO py3 maintainer_email='',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=["requests", "netaddr", "PyYAML", "osm-ro",],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula'],
+    },
+)
diff --git a/RO-VIM-opennebula/stdeb.cfg b/RO-VIM-opennebula/stdeb.cfg
new file mode 100644 (file)
index 0000000..00071bd
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Copyright 2017  Telefonica Digital Spain S.L.U.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro, python3-pip
+
diff --git a/RO-VIM-opennebula/tox.ini b/RO-VIM-opennebula/tox.ini
new file mode 100644 (file)
index 0000000..6fb9d37
--- /dev/null
@@ -0,0 +1,42 @@
+##
+# Copyright 2017  Telefonica Digital Spain S.L.U.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_opennebula --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_opennebula.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-VIM-openstack/Makefile b/RO-VIM-openstack/Makefile
new file mode 100644 (file)
index 0000000..dfafea3
--- /dev/null
@@ -0,0 +1,25 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_openstack-*.tar.gz osm_rovim_openstack.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-rovim-openstack.postinst deb_dist/osm-rovim-openstack*/debian/
+       cd deb_dist/osm-rovim-openstack*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-VIM-openstack/debian/python3-osm-rovim-openstack.postinst b/RO-VIM-openstack/debian/python3-osm-rovim-openstack.postinst
new file mode 100755 (executable)
index 0000000..055d4a5
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROVIM-OPENSTACK"
+
+#Pip packages required for openstack connector
+python3 -m pip install networking-l2gw
diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
new file mode 100644 (file)
index 0000000..5eb23f0
--- /dev/null
@@ -0,0 +1,854 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+This module contains unit tests for the OpenStack VIM connector
+Run this directly with python2 or python3.
+"""
+
+import copy
+import unittest
+
+import mock
+from neutronclient.v2_0.client import Client
+
+from osm_ro import vimconn
+from osm_ro.vimconn_openstack import vimconnector
+
+
+__author__ = "Igor D.C."
+__date__ = "$23-aug-2017 23:59:59$"
+
+
+class TestSfcOperations(unittest.TestCase):
+    def setUp(self):
+        # instantiate dummy VIM connector so we can test it
+        self.vimconn = vimconnector(
+            '123', 'openstackvim', '456', '789', 'http://dummy.url', None,
+            'user', 'pass')
+
+    def _test_new_sfi(self, create_sfc_port_pair, sfc_encap,
+                      ingress_ports=['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
+                      egress_ports=['230cdf1b-de37-4891-bc07-f9010cf1f967']):
+        # input to VIM connector
+        name = 'osm_sfi'
+        # + ingress_ports
+        # + egress_ports
+        # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
+        correlation = 'nsh'
+        if sfc_encap is not None:
+            if not sfc_encap:
+                correlation = None
+
+        # what OpenStack is assumed to respond (patch OpenStack's return value)
+        dict_from_neutron = {'port_pair': {
+            'id': '3d7ddc13-923c-4332-971e-708ed82902ce',
+            'name': name,
+            'description': '',
+            'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
+            'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
+            'ingress': ingress_ports[0] if len(ingress_ports) else None,
+            'egress': egress_ports[0] if len(egress_ports) else None,
+            'service_function_parameters': {'correlation': correlation}
+        }}
+        create_sfc_port_pair.return_value = dict_from_neutron
+
+        # what the VIM connector is expected to
+        # send to OpenStack based on the input
+        dict_to_neutron = {'port_pair': {
+            'name': name,
+            'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+            'egress': '230cdf1b-de37-4891-bc07-f9010cf1f967',
+            'service_function_parameters': {'correlation': correlation}
+        }}
+
+        # call the VIM connector
+        if sfc_encap is None:
+            result = self.vimconn.new_sfi(name, ingress_ports, egress_ports)
+        else:
+            result = self.vimconn.new_sfi(name, ingress_ports, egress_ports,
+                                          sfc_encap)
+
+        # assert that the VIM connector made the expected call to OpenStack
+        create_sfc_port_pair.assert_called_with(dict_to_neutron)
+        # assert that the VIM connector had the expected result / return value
+        self.assertEqual(result, dict_from_neutron['port_pair']['id'])
+
+    def _test_new_sf(self, create_sfc_port_pair_group):
+        # input to VIM connector
+        name = 'osm_sf'
+        instances = ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
+                     '12ba215e-3987-4892-bd3a-d0fd91eecf98',
+                     'e25a7c79-14c8-469a-9ae1-f601c9371ffd']
+
+        # what OpenStack is assumed to respond (patch OpenStack's return value)
+        dict_from_neutron = {'port_pair_group': {
+            'id': '3d7ddc13-923c-4332-971e-708ed82902ce',
+            'name': name,
+            'description': '',
+            'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
+            'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
+            'port_pairs': instances,
+            'group_id': 1,
+            'port_pair_group_parameters': {
+                "lb_fields": [],
+                "ppg_n_tuple_mapping": {
+                    "ingress_n_tuple": {},
+                    "egress_n_tuple": {}
+                }}
+        }}
+        create_sfc_port_pair_group.return_value = dict_from_neutron
+
+        # what the VIM connector is expected to
+        # send to OpenStack based on the input
+        dict_to_neutron = {'port_pair_group': {
+            'name': name,
+            'port_pairs': ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
+                           '12ba215e-3987-4892-bd3a-d0fd91eecf98',
+                           'e25a7c79-14c8-469a-9ae1-f601c9371ffd']
+        }}
+
+        # call the VIM connector
+        result = self.vimconn.new_sf(name, instances)
+
+        # assert that the VIM connector made the expected call to OpenStack
+        create_sfc_port_pair_group.assert_called_with(dict_to_neutron)
+        # assert that the VIM connector had the expected result / return value
+        self.assertEqual(result, dict_from_neutron['port_pair_group']['id'])
+
+    def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi):
+        # input to VIM connector
+        name = 'osm_sfp'
+        classifications = ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
+                           '00f23389-bdfa-43c2-8b16-5815f2582fa8']
+        sfs = ['2314daec-c262-414a-86e3-69bb6fa5bc16',
+               'd8bfdb5d-195e-4f34-81aa-6135705317df']
+
+        # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
+        correlation = 'nsh'
+        chain_id = 33
+        if spi:
+            chain_id = spi
+
+        # what OpenStack is assumed to respond (patch OpenStack's return value)
+        dict_from_neutron = {'port_chain': {
+            'id': '5bc05721-079b-4b6e-a235-47cac331cbb6',
+            'name': name,
+            'description': '',
+            'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
+            'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
+            'chain_id': chain_id,
+            'flow_classifiers': classifications,
+            'port_pair_groups': sfs,
+            'chain_parameters': {'correlation': correlation}
+        }}
+        create_sfc_port_chain.return_value = dict_from_neutron
+
+        # what the VIM connector is expected to
+        # send to OpenStack based on the input
+        dict_to_neutron = {'port_chain': {
+            'name': name,
+            'flow_classifiers': ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
+                                 '00f23389-bdfa-43c2-8b16-5815f2582fa8'],
+            'port_pair_groups': ['2314daec-c262-414a-86e3-69bb6fa5bc16',
+                                 'd8bfdb5d-195e-4f34-81aa-6135705317df'],
+            'chain_parameters': {'correlation': correlation}
+        }}
+        if spi:
+            dict_to_neutron['port_chain']['chain_id'] = spi
+
+        # call the VIM connector
+        if sfc_encap is None:
+            if spi is None:
+                result = self.vimconn.new_sfp(name, classifications, sfs)
+            else:
+                result = self.vimconn.new_sfp(name, classifications, sfs,
+                                              spi=spi)
+        else:
+            if spi is None:
+                result = self.vimconn.new_sfp(name, classifications, sfs,
+                                              sfc_encap)
+            else:
+                result = self.vimconn.new_sfp(name, classifications, sfs,
+                                              sfc_encap, spi)
+
+        # assert that the VIM connector made the expected call to OpenStack
+        create_sfc_port_chain.assert_called_with(dict_to_neutron)
+        # assert that the VIM connector had the expected result / return value
+        self.assertEqual(result, dict_from_neutron['port_chain']['id'])
+
+    def _test_new_classification(self, create_sfc_flow_classifier, ctype):
+        # input to VIM connector
+        name = 'osm_classification'
+        definition = {'ethertype': 'IPv4',
+                      'logical_source_port':
+                          'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
+                      'protocol': 'tcp',
+                      'source_ip_prefix': '192.168.2.0/24',
+                      'source_port_range_max': 99,
+                      'source_port_range_min': 50}
+
+        # what OpenStack is assumed to respond (patch OpenStack's return value)
+        dict_from_neutron = {'flow_classifier': copy.copy(definition)}
+        dict_from_neutron['flow_classifier'][
+            'id'] = '7735ec2c-fddf-4130-9712-32ed2ab6a372'
+        dict_from_neutron['flow_classifier']['name'] = name
+        dict_from_neutron['flow_classifier']['description'] = ''
+        dict_from_neutron['flow_classifier'][
+            'tenant_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
+        dict_from_neutron['flow_classifier'][
+            'project_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
+        create_sfc_flow_classifier.return_value = dict_from_neutron
+
+        # what the VIM connector is expected to
+        # send to OpenStack based on the input
+        dict_to_neutron = {'flow_classifier': copy.copy(definition)}
+        dict_to_neutron['flow_classifier']['name'] = 'osm_classification'
+
+        # call the VIM connector
+        result = self.vimconn.new_classification(name, ctype, definition)
+
+        # assert that the VIM connector made the expected call to OpenStack
+        create_sfc_flow_classifier.assert_called_with(dict_to_neutron)
+        # assert that the VIM connector had the expected result / return value
+        self.assertEqual(result, dict_from_neutron['flow_classifier']['id'])
+
+    @mock.patch.object(Client, 'create_sfc_flow_classifier')
+    def test_new_classification(self, create_sfc_flow_classifier):
+        self._test_new_classification(create_sfc_flow_classifier,
+                                      'legacy_flow_classifier')
+
+    @mock.patch.object(Client, 'create_sfc_flow_classifier')
+    def test_new_classification_unsupported_type(self, create_sfc_flow_classifier):
+        self.assertRaises(vimconn.vimconnNotSupportedException,
+                          self._test_new_classification,
+                          create_sfc_flow_classifier, 'h265')
+
+    @mock.patch.object(Client, 'create_sfc_port_pair')
+    def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair):
+        self._test_new_sfi(create_sfc_port_pair, True)
+
+    @mock.patch.object(Client, 'create_sfc_port_pair')
+    def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair):
+        self._test_new_sfi(create_sfc_port_pair, False)
+
+    @mock.patch.object(Client, 'create_sfc_port_pair')
+    def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair):
+        self._test_new_sfi(create_sfc_port_pair, None)
+
+    @mock.patch.object(Client, 'create_sfc_port_pair')
+    def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair):
+        ingress_ports = ['5311c75d-d718-4369-bbda-cdcc6da60fcc',
+                         'a0273f64-82c9-11e7-b08f-6328e53f0fa7']
+        self.assertRaises(vimconn.vimconnNotSupportedException,
+                          self._test_new_sfi,
+                          create_sfc_port_pair, True, ingress_ports=ingress_ports)
+        ingress_ports = []
+        self.assertRaises(vimconn.vimconnNotSupportedException,
+                          self._test_new_sfi,
+                          create_sfc_port_pair, True, ingress_ports=ingress_ports)
+
+    @mock.patch.object(Client, 'create_sfc_port_pair')
+    def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair):
+        egress_ports = ['230cdf1b-de37-4891-bc07-f9010cf1f967',
+                        'b41228fe-82c9-11e7-9b44-17504174320b']
+        self.assertRaises(vimconn.vimconnNotSupportedException,
+                          self._test_new_sfi,
+                          create_sfc_port_pair, True, egress_ports=egress_ports)
+        egress_ports = []
+        self.assertRaises(vimconn.vimconnNotSupportedException,
+                          self._test_new_sfi,
+                          create_sfc_port_pair, True, egress_ports=egress_ports)
+
+    @mock.patch.object(vimconnector, 'get_sfi')
+    @mock.patch.object(Client, 'create_sfc_port_pair_group')
+    def test_new_sf(self, create_sfc_port_pair_group, get_sfi):
+        get_sfi.return_value = {'sfc_encap': True}
+        self._test_new_sf(create_sfc_port_pair_group)
+
+    @mock.patch.object(vimconnector, 'get_sfi')
+    @mock.patch.object(Client, 'create_sfc_port_pair_group')
+    def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group,
+                                           get_sfi):
+        get_sfi.return_value = {'sfc_encap': 'nsh'}
+        self.assertRaises(vimconn.vimconnNotSupportedException,
+                          self._test_new_sf, create_sfc_port_pair_group)
+
+    @mock.patch.object(Client, 'create_sfc_port_chain')
+    def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain):
+        self._test_new_sfp(create_sfc_port_chain, True, None)
+
+    @mock.patch.object(Client, 'create_sfc_port_chain')
+    def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain):
+        self._test_new_sfp(create_sfc_port_chain, False, None)
+        self._test_new_sfp(create_sfc_port_chain, False, 25)
+
+    @mock.patch.object(Client, 'create_sfc_port_chain')
+    def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain):
+        self._test_new_sfp(create_sfc_port_chain, None, None)
+
+    @mock.patch.object(Client, 'create_sfc_port_chain')
+    def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain):
+        self._test_new_sfp(create_sfc_port_chain, True, 25)
+
+    @mock.patch.object(Client, 'create_sfc_port_chain')
+    def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain):
+        self._test_new_sfp(create_sfc_port_chain, None, 25)
+
+    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+    def test_get_classification_list(self, list_sfc_flow_classifiers):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
+            {'source_port_range_min': 2000,
+             'destination_ip_prefix': '192.168.3.0/24',
+             'protocol': 'udp',
+             'description': '',
+             'ethertype': 'IPv4',
+             'l7_parameters': {},
+             'source_port_range_max': 2000,
+             'destination_port_range_min': 3000,
+             'source_ip_prefix': '192.168.2.0/24',
+             'logical_destination_port': None,
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'destination_port_range_max': None,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
+             'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
+             'name': 'fc1'}]}
+
+        # call the VIM connector
+        filter_dict = {'protocol': 'tcp', 'ethertype': 'IPv4'}
+        result = self.vimconn.get_classification_list(filter_dict.copy())
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_flow_classifiers.assert_called_with(**filter_dict)
+        # assert that the VIM connector successfully
+        # translated and returned the OpenStack result
+        self.assertEqual(result, [
+            {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
+             'name': 'fc1',
+             'description': '',
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'ctype': 'legacy_flow_classifier',
+             'definition': {
+                 'source_port_range_min': 2000,
+                 'destination_ip_prefix': '192.168.3.0/24',
+                 'protocol': 'udp',
+                 'ethertype': 'IPv4',
+                 'l7_parameters': {},
+                 'source_port_range_max': 2000,
+                 'destination_port_range_min': 3000,
+                 'source_ip_prefix': '192.168.2.0/24',
+                 'logical_destination_port': None,
+                 'destination_port_range_max': None,
+                 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
+             }])
+
+    def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap):
+        # what OpenStack is assumed to return to the VIM connector
+        list_port_pair.return_value = {'port_pairs': [
+            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'service_function_parameters': {'correlation': correlation},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
+             'name': 'osm_sfi'}]}
+
+        # call the VIM connector
+        filter_dict = {'name': 'osm_sfi', 'description': ''}
+        result = self.vimconn.get_sfi_list(filter_dict.copy())
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_port_pair.assert_called_with(**filter_dict)
+        # assert that the VIM connector successfully
+        # translated and returned the OpenStack result
+        self.assertEqual(result, [
+            {'ingress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'egress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
+             'sfc_encap': sfc_encap,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
+             'name': 'osm_sfi'}])
+
+    @mock.patch.object(Client, 'list_sfc_port_pairs')
+    def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs):
+        self._test_get_sfi_list(list_sfc_port_pairs, 'nsh', True)
+
+    @mock.patch.object(Client, 'list_sfc_port_pairs')
+    def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs):
+        self._test_get_sfi_list(list_sfc_port_pairs, None, False)
+
+    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+    def test_get_sf_list(self, list_sfc_port_pair_groups):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
+            {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
+                            '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'port_pair_group_parameters': {},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
+             'name': 'osm_sf'}]}
+
+        # call the VIM connector
+        filter_dict = {'name': 'osm_sf', 'description': ''}
+        result = self.vimconn.get_sf_list(filter_dict.copy())
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_pair_groups.assert_called_with(**filter_dict)
+        # assert that the VIM connector successfully
+        # translated and returned the OpenStack result
+        self.assertEqual(result, [
+            {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
+                           '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
+             'name': 'osm_sf'}])
+
+    def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_chains.return_value = {'port_chains': [
+            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
+                                  '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
+             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
+                                  '1387ab44-82d7-11e7-9bb0-476337183905'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'chain_parameters': {'correlation': correlation},
+             'chain_id': 40,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
+             'name': 'osm_sfp'}]}
+
+        # call the VIM connector
+        filter_dict = {'name': 'osm_sfp', 'description': ''}
+        result = self.vimconn.get_sfp_list(filter_dict.copy())
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_chains.assert_called_with(**filter_dict)
+        # assert that the VIM connector successfully
+        # translated and returned the OpenStack result
+        self.assertEqual(result, [
+            {'service_functions': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
+                                   '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
+             'classifications': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
+                                 '1387ab44-82d7-11e7-9bb0-476337183905'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'sfc_encap': sfc_encap,
+             'spi': 40,
+             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
+             'name': 'osm_sfp'}])
+
+    @mock.patch.object(Client, 'list_sfc_port_chains')
+    def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains):
+        self._test_get_sfp_list(list_sfc_port_chains, 'nsh', True)
+
+    @mock.patch.object(Client, 'list_sfc_port_chains')
+    def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains):
+        self._test_get_sfp_list(list_sfc_port_chains, None, False)
+
+    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+    def test_get_classification(self, list_sfc_flow_classifiers):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
+            {'source_port_range_min': 2000,
+             'destination_ip_prefix': '192.168.3.0/24',
+             'protocol': 'udp',
+             'description': '',
+             'ethertype': 'IPv4',
+             'l7_parameters': {},
+             'source_port_range_max': 2000,
+             'destination_port_range_min': 3000,
+             'source_ip_prefix': '192.168.2.0/24',
+             'logical_destination_port': None,
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'destination_port_range_max': None,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
+             'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
+             'name': 'fc1'}
+        ]}
+
+        # call the VIM connector
+        result = self.vimconn.get_classification(
+            '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_flow_classifiers.assert_called_with(
+            id='22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
+        # assert that VIM connector successfully returned the OpenStack result
+        self.assertEqual(result,
+                         {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
+                          'name': 'fc1',
+                          'description': '',
+                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'ctype': 'legacy_flow_classifier',
+                          'definition': {
+                              'source_port_range_min': 2000,
+                              'destination_ip_prefix': '192.168.3.0/24',
+                              'protocol': 'udp',
+                              'ethertype': 'IPv4',
+                              'l7_parameters': {},
+                              'source_port_range_max': 2000,
+                              'destination_port_range_min': 3000,
+                              'source_ip_prefix': '192.168.2.0/24',
+                              'logical_destination_port': None,
+                              'destination_port_range_max': None,
+                              'logical_source_port':
+                                  'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
+                          })
+
+    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+    def test_get_classification_many_results(self, list_sfc_flow_classifiers):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
+            {'source_port_range_min': 2000,
+             'destination_ip_prefix': '192.168.3.0/24',
+             'protocol': 'udp',
+             'description': '',
+             'ethertype': 'IPv4',
+             'l7_parameters': {},
+             'source_port_range_max': 2000,
+             'destination_port_range_min': 3000,
+             'source_ip_prefix': '192.168.2.0/24',
+             'logical_destination_port': None,
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'destination_port_range_max': None,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
+             'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
+             'name': 'fc1'},
+            {'source_port_range_min': 1000,
+             'destination_ip_prefix': '192.168.3.0/24',
+             'protocol': 'udp',
+             'description': '',
+             'ethertype': 'IPv4',
+             'l7_parameters': {},
+             'source_port_range_max': 1000,
+             'destination_port_range_min': 3000,
+             'source_ip_prefix': '192.168.2.0/24',
+             'logical_destination_port': None,
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'destination_port_range_max': None,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
+             'id': '3196bafc-82dd-11e7-a205-9bf6c14b0721',
+             'name': 'fc2'}
+        ]}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnConflictException,
+                          self.vimconn.get_classification,
+                          '3196bafc-82dd-11e7-a205-9bf6c14b0721')
+
+        # assert the VIM connector called OpenStack with the expected filter
+        list_sfc_flow_classifiers.assert_called_with(
+            id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
+
+    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
+    def test_get_classification_no_results(self, list_sfc_flow_classifiers):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_flow_classifiers.return_value = {'flow_classifiers': []}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnNotFoundException,
+                          self.vimconn.get_classification,
+                          '3196bafc-82dd-11e7-a205-9bf6c14b0721')
+
+        # assert the VIM connector called OpenStack with the expected filter
+        list_sfc_flow_classifiers.assert_called_with(
+            id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
+
+    @mock.patch.object(Client, 'list_sfc_port_pairs')
+    def test_get_sfi(self, list_sfc_port_pairs):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pairs.return_value = {'port_pairs': [
+            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'service_function_parameters': {'correlation': 'nsh'},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
+             'name': 'osm_sfi1'},
+        ]}
+
+        # call the VIM connector
+        result = self.vimconn.get_sfi('c121ebdd-7f2d-4213-b933-3325298a6966')
+
+        # assert the VIM connector called OpenStack with the expected filter
+        list_sfc_port_pairs.assert_called_with(
+            id='c121ebdd-7f2d-4213-b933-3325298a6966')
+        # assert the VIM connector successfully returned the OpenStack result
+        self.assertEqual(result,
+                         {'ingress_ports': [
+                             '5311c75d-d718-4369-bbda-cdcc6da60fcc'],
+                          'egress_ports': [
+                              '5311c75d-d718-4369-bbda-cdcc6da60fcc'],
+                          'sfc_encap': True,
+                          'description': '',
+                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
+                          'name': 'osm_sfi1'})
+
+    @mock.patch.object(Client, 'list_sfc_port_pairs')
+    def test_get_sfi_many_results(self, list_sfc_port_pairs):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pairs.return_value = {'port_pairs': [
+            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'service_function_parameters': {'correlation': 'nsh'},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
+             'name': 'osm_sfi1'},
+            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
+             'service_function_parameters': {'correlation': 'nsh'},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'c0436d92-82db-11e7-8f9c-5fa535f1261f',
+             'name': 'osm_sfi2'}
+        ]}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnConflictException,
+                          self.vimconn.get_sfi,
+                          'c0436d92-82db-11e7-8f9c-5fa535f1261f')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_pairs.assert_called_with(
+            id='c0436d92-82db-11e7-8f9c-5fa535f1261f')
+
+    @mock.patch.object(Client, 'list_sfc_port_pairs')
+    def test_get_sfi_no_results(self, list_sfc_port_pairs):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pairs.return_value = {'port_pairs': []}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnNotFoundException,
+                          self.vimconn.get_sfi,
+                          'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_pairs.assert_called_with(
+            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+    def test_get_sf(self, list_sfc_port_pair_groups):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
+            {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'port_pair_group_parameters': {},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
+             'name': 'osm_sf1'}
+        ]}
+
+        # call the VIM connector
+        result = self.vimconn.get_sf('b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_pair_groups.assert_called_with(
+            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+        # assert that VIM connector successfully returned the OpenStack result
+        self.assertEqual(result,
+                         {'description': '',
+                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
+                          'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
+                          'name': 'osm_sf1'})
+
+    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+    def test_get_sf_many_results(self, list_sfc_port_pair_groups):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
+            {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'port_pair_group_parameters': {},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
+             'name': 'osm_sf1'},
+            {'port_pairs': ['0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'port_pair_group_parameters': {},
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': 'b22892fc-82d9-11e7-ae85-0fea6a3b3757',
+             'name': 'osm_sf2'}
+        ]}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnConflictException,
+                          self.vimconn.get_sf,
+                          'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_pair_groups.assert_called_with(
+            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
+    def test_get_sf_no_results(self, list_sfc_port_pair_groups):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_pair_groups.return_value = {'port_pair_groups': []}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnNotFoundException,
+                          self.vimconn.get_sf,
+                          'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_pair_groups.assert_called_with(
+            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
+
+    @mock.patch.object(Client, 'list_sfc_port_chains')
+    def test_get_sfp(self, list_sfc_port_chains):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_chains.return_value = {'port_chains': [
+            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
+             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'chain_parameters': {'correlation': 'nsh'},
+             'chain_id': 40,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
+             'name': 'osm_sfp1'}]}
+
+        # call the VIM connector
+        result = self.vimconn.get_sfp('821bc9be-82d7-11e7-8ce3-23a08a27ab47')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_chains.assert_called_with(
+            id='821bc9be-82d7-11e7-8ce3-23a08a27ab47')
+        # assert that VIM connector successfully returned the OpenStack result
+        self.assertEqual(result,
+                         {'service_functions': [
+                             '7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
+                          'classifications': [
+                              '1333c2f4-82d7-11e7-a5df-9327f33d104e'],
+                          'description': '',
+                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+                          'sfc_encap': True,
+                          'spi': 40,
+                          'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
+                          'name': 'osm_sfp1'})
+
+    @mock.patch.object(Client, 'list_sfc_port_chains')
+    def test_get_sfp_many_results(self, list_sfc_port_chains):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_chains.return_value = {'port_chains': [
+            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
+             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'chain_parameters': {'correlation': 'nsh'},
+             'chain_id': 40,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
+             'name': 'osm_sfp1'},
+            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
+             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
+             'description': '',
+             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'chain_parameters': {'correlation': 'nsh'},
+             'chain_id': 50,
+             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
+             'id': '5d002f38-82de-11e7-a770-f303f11ce66a',
+             'name': 'osm_sfp2'}
+        ]}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnConflictException,
+                          self.vimconn.get_sfp,
+                          '5d002f38-82de-11e7-a770-f303f11ce66a')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_chains.assert_called_with(
+            id='5d002f38-82de-11e7-a770-f303f11ce66a')
+
+    @mock.patch.object(Client, 'list_sfc_port_chains')
+    def test_get_sfp_no_results(self, list_sfc_port_chains):
+        # what OpenStack is assumed to return to the VIM connector
+        list_sfc_port_chains.return_value = {'port_chains': []}
+
+        # call the VIM connector
+        self.assertRaises(vimconn.vimconnNotFoundException,
+                          self.vimconn.get_sfp,
+                          '5d002f38-82de-11e7-a770-f303f11ce66a')
+
+        # assert that VIM connector called OpenStack with the expected filter
+        list_sfc_port_chains.assert_called_with(
+            id='5d002f38-82de-11e7-a770-f303f11ce66a')
+
+    @mock.patch.object(Client, 'delete_sfc_flow_classifier')
+    def test_delete_classification(self, delete_sfc_flow_classifier):
+        result = self.vimconn.delete_classification(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        delete_sfc_flow_classifier.assert_called_with(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+
+    @mock.patch.object(Client, 'delete_sfc_port_pair')
+    def test_delete_sfi(self, delete_sfc_port_pair):
+        result = self.vimconn.delete_sfi(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        delete_sfc_port_pair.assert_called_with(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+
+    @mock.patch.object(Client, 'delete_sfc_port_pair_group')
+    def test_delete_sf(self, delete_sfc_port_pair_group):
+        result = self.vimconn.delete_sf('638f957c-82df-11e7-b7c8-132706021464')
+        delete_sfc_port_pair_group.assert_called_with(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+
+    @mock.patch.object(Client, 'delete_sfc_port_chain')
+    def test_delete_sfp(self, delete_sfc_port_chain):
+        result = self.vimconn.delete_sfp(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        delete_sfc_port_chain.assert_called_with(
+            '638f957c-82df-11e7-b7c8-132706021464')
+        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
new file mode 100644 (file)
index 0000000..1c2b072
--- /dev/null
@@ -0,0 +1,2223 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+'''
+osconnector implements all the methods to interact with openstack using the python-neutronclient.
+
+For the VNF forwarding graph, The OpenStack VIM connector calls the
+networking-sfc Neutron extension methods, whose resources are mapped
+to the VIM connector's SFC resources as follows:
+- Classification (OSM) -> Flow Classifier (Neutron)
+- Service Function Instance (OSM) -> Port Pair (Neutron)
+- Service Function (OSM) -> Port Pair Group (Neutron)
+- Service Function Path (OSM) -> Port Chain (Neutron)
+'''
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
+__date__  = "$22-sep-2017 23:59:59$"
+
+from osm_ro import vimconn
+# import json
+import logging
+import netaddr
+import time
+import yaml
+import random
+import re
+import copy
+from pprint import pformat
+
+from novaclient import client as nClient, exceptions as nvExceptions
+from keystoneauth1.identity import v2, v3
+from keystoneauth1 import session
+import keystoneclient.exceptions as ksExceptions
+import keystoneclient.v3.client as ksClient_v3
+import keystoneclient.v2_0.client as ksClient_v2
+from glanceclient import client as glClient
+import glanceclient.exc as gl1Exceptions
+from  cinderclient import client as cClient
+from http.client  import HTTPException   # TODO py3 check that this base exception matches python2 httplib.HTTPException
+from neutronclient.neutron import client as neClient
+from neutronclient.common import exceptions as neExceptions
+from requests.exceptions import ConnectionError
+
+
+"""contain the openstack virtual machine status to openmano status"""
+vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+                     'PAUSED':'PAUSED',
+                     'SUSPENDED': 'SUSPENDED',
+                     'SHUTOFF':'INACTIVE',
+                     'BUILD':'BUILD',
+                     'ERROR':'ERROR','DELETED':'DELETED'
+                     }
+netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
+                     }
+
+supportedClassificationTypes = ['legacy_flow_classifier']
+
+#global var to have a timeout creating and deleting volumes
+volume_timeout = 600
+server_timeout = 600
+
+
+class SafeDumper(yaml.SafeDumper):
+    def represent_data(self, data):
+        # Openstack APIs use custom subclasses of dict and YAML safe dumper
+        # is designed to not handle that (reference issue 142 of pyyaml)
+        if isinstance(data, dict) and data.__class__ != dict:
+            # A simple solution is to convert those items back to dicts
+            data = dict(data.items())
+
+        return super(SafeDumper, self).represent_data(data)
+
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                 log_level=None, config={}, persistent_info={}):
+        '''using common constructor parameters. In this case
+        'url' is the keystone authorization url,
+        'url_admin' is not use
+        '''
+        api_version = config.get('APIversion')
+        if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'):
+            raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. "
+                                           "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version))
+        vim_type = config.get('vim_type')
+        if vim_type and vim_type not in ('vio', 'VIO'):
+            raise vimconn.vimconnException("Invalid value '{}' for config:vim_type."
+                            "Allowed values are 'vio' or 'VIO'".format(vim_type))
+
+        if config.get('dataplane_net_vlan_range') is not None:
+            #validate vlan ranges provided by user
+            self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range')
+
+        if config.get('multisegment_vlan_range') is not None:
+            #validate vlan ranges provided by user
+            self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range')
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                      config)
+
+        if self.config.get("insecure") and self.config.get("ca_cert"):
+            raise vimconn.vimconnException("options insecure and ca_cert are mutually exclusive")
+        self.verify = True
+        if self.config.get("insecure"):
+            self.verify = False
+        if self.config.get("ca_cert"):
+            self.verify = self.config.get("ca_cert")
+
+        if not url:
+            raise TypeError('url param can not be NoneType')
+        self.persistent_info = persistent_info
+        self.availability_zone = persistent_info.get('availability_zone', None)
+        self.session = persistent_info.get('session', {'reload_client': True})
+        self.my_tenant_id = self.session.get('my_tenant_id')
+        self.nova = self.session.get('nova')
+        self.neutron = self.session.get('neutron')
+        self.cinder = self.session.get('cinder')
+        self.glance = self.session.get('glance')
+        # self.glancev1 = self.session.get('glancev1')
+        self.keystone = self.session.get('keystone')
+        self.api_version3 = self.session.get('api_version3')
+        self.vim_type = self.config.get("vim_type")
+        if self.vim_type:
+            self.vim_type = self.vim_type.upper()
+        if self.config.get("use_internal_endpoint"):
+            self.endpoint_type = "internalURL"
+        else:
+            self.endpoint_type = None
+
+        self.logger = logging.getLogger('openmano.vim.openstack')
+
+        # allow security_groups to be a list or a single string
+        if isinstance(self.config.get('security_groups'), str):
+            self.config['security_groups'] = [self.config['security_groups']]
+        self.security_groups_id = None
+
+        ####### VIO Specific Changes #########
+        if self.vim_type == "VIO":
+            self.logger = logging.getLogger('openmano.vim.vio')
+
+        if log_level:
+            self.logger.setLevel( getattr(logging, log_level))
+
+    def __getitem__(self, index):
+        """Get individuals parameters.
+        Throw KeyError"""
+        if index == 'project_domain_id':
+            return self.config.get("project_domain_id")
+        elif index == 'user_domain_id':
+            return self.config.get("user_domain_id")
+        else:
+            return vimconn.vimconnector.__getitem__(self, index)
+
+    def __setitem__(self, index, value):
+        """Set individuals parameters and it is marked as dirty so to force connection reload.
+        Throw KeyError"""
+        if index == 'project_domain_id':
+            self.config["project_domain_id"] = value
+        elif index == 'user_domain_id':
+                self.config["user_domain_id"] = value
+        else:
+            vimconn.vimconnector.__setitem__(self, index, value)
+        self.session['reload_client'] = True
+
+    def serialize(self, value):
+        """Serialization of python basic types.
+
+        In the case value is not serializable a message will be logged and a
+        simple representation of the data that cannot be converted back to
+        python is returned.
+        """
+        if isinstance(value, str):
+            return value
+
+        try:
+            return yaml.dump(value, Dumper=SafeDumper,
+                             default_flow_style=True, width=256)
+        except yaml.representer.RepresenterError:
+                self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value),
+                                  exc_info=True)
+                return str(value)
+
+    def _reload_connection(self):
+        '''Called before any operation, it check if credentials has changed
+        Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
+        '''
+        #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
+        if self.session['reload_client']:
+            if self.config.get('APIversion'):
+                self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
+            else:  # get from ending auth_url that end with v3 or with v2.0
+                self.api_version3 =  self.url.endswith("/v3") or self.url.endswith("/v3/")
+            self.session['api_version3'] = self.api_version3
+            if self.api_version3:
+                if self.config.get('project_domain_id') or self.config.get('project_domain_name'):
+                    project_domain_id_default = None
+                else:
+                    project_domain_id_default = 'default'
+                if self.config.get('user_domain_id') or self.config.get('user_domain_name'):
+                    user_domain_id_default = None
+                else:
+                    user_domain_id_default = 'default'
+                auth = v3.Password(auth_url=self.url,
+                                   username=self.user,
+                                   password=self.passwd,
+                                   project_name=self.tenant_name,
+                                   project_id=self.tenant_id,
+                                   project_domain_id=self.config.get('project_domain_id', project_domain_id_default),
+                                   user_domain_id=self.config.get('user_domain_id', user_domain_id_default),
+                                   project_domain_name=self.config.get('project_domain_name'),
+                                   user_domain_name=self.config.get('user_domain_name'))
+            else:
+                auth = v2.Password(auth_url=self.url,
+                                   username=self.user,
+                                   password=self.passwd,
+                                   tenant_name=self.tenant_name,
+                                   tenant_id=self.tenant_id)
+            sess = session.Session(auth=auth, verify=self.verify)
+            # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+            region_name = self.config.get('region_name')
+            if self.api_version3:
+                self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            else:
+                self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
+            self.session['keystone'] = self.keystone
+            # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
+            # This implementation approach is due to the warning message in
+            # https://developer.openstack.org/api-guide/compute/microversions.html
+            # where it is stated that microversion backwards compatibility is not guaranteed and clients should
+            # always require an specific microversion.
+            # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
+            version = self.config.get("microversion")
+            if not version:
+                version = "2.1"
+            # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+            self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            try:
+                self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
+            except Exception as e:
+                self.logger.error("Cannot get project_id from session", exc_info=True)
+            if self.endpoint_type == "internalURL":
+                glance_service_id = self.keystone.services.list(name="glance")[0].id
+                glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
+            else:
+                glance_endpoint = None
+            self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
+            # using version 1 of glance client in new_image()
+            # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
+            #                                                            endpoint=glance_endpoint)
+            self.session['reload_client'] = False
+            self.persistent_info['session'] = self.session
+            # add availablity zone info inside  self.persistent_info
+            self._set_availablity_zones()
+            self.persistent_info['availability_zone'] = self.availability_zone
+            self.security_groups_id = None  # force to get again security_groups_ids next time they are needed
+
+    def __net_os2mano(self, net_list_dict):
+        '''Transform the net openstack format to mano format
+        net_list_dict can be a list of dict or a single dict'''
+        if type(net_list_dict) is dict:
+            net_list_=(net_list_dict,)
+        elif type(net_list_dict) is list:
+            net_list_=net_list_dict
+        else:
+            raise TypeError("param net_list_dict must be a list or a dictionary")
+        for net in net_list_:
+            if net.get('provider:network_type') == "vlan":
+                net['type']='data'
+            else:
+                net['type']='bridge'
+
+    def __classification_os2mano(self, class_list_dict):
+        """Transform the openstack format (Flow Classifier) to mano format
+        (Classification) class_list_dict can be a list of dict or a single dict
+        """
+        if isinstance(class_list_dict, dict):
+            class_list_ = [class_list_dict]
+        elif isinstance(class_list_dict, list):
+            class_list_ = class_list_dict
+        else:
+            raise TypeError(
+                "param class_list_dict must be a list or a dictionary")
+        for classification in class_list_:
+            id = classification.pop('id')
+            name = classification.pop('name')
+            description = classification.pop('description')
+            project_id = classification.pop('project_id')
+            tenant_id = classification.pop('tenant_id')
+            original_classification = copy.deepcopy(classification)
+            classification.clear()
+            classification['ctype'] = 'legacy_flow_classifier'
+            classification['definition'] = original_classification
+            classification['id'] = id
+            classification['name'] = name
+            classification['description'] = description
+            classification['project_id'] = project_id
+            classification['tenant_id'] = tenant_id
+
+    def __sfi_os2mano(self, sfi_list_dict):
+        """Transform the openstack format (Port Pair) to mano format (SFI)
+        sfi_list_dict can be a list of dict or a single dict
+        """
+        if isinstance(sfi_list_dict, dict):
+            sfi_list_ = [sfi_list_dict]
+        elif isinstance(sfi_list_dict, list):
+            sfi_list_ = sfi_list_dict
+        else:
+            raise TypeError(
+                "param sfi_list_dict must be a list or a dictionary")
+        for sfi in sfi_list_:
+            sfi['ingress_ports'] = []
+            sfi['egress_ports'] = []
+            if sfi.get('ingress'):
+                sfi['ingress_ports'].append(sfi['ingress'])
+            if sfi.get('egress'):
+                sfi['egress_ports'].append(sfi['egress'])
+            del sfi['ingress']
+            del sfi['egress']
+            params = sfi.get('service_function_parameters')
+            sfc_encap = False
+            if params:
+                correlation = params.get('correlation')
+                if correlation:
+                    sfc_encap = True
+            sfi['sfc_encap'] = sfc_encap
+            del sfi['service_function_parameters']
+
+    def __sf_os2mano(self, sf_list_dict):
+        """Transform the openstack format (Port Pair Group) to mano format (SF)
+        sf_list_dict can be a list of dict or a single dict
+        """
+        if isinstance(sf_list_dict, dict):
+            sf_list_ = [sf_list_dict]
+        elif isinstance(sf_list_dict, list):
+            sf_list_ = sf_list_dict
+        else:
+            raise TypeError(
+                "param sf_list_dict must be a list or a dictionary")
+        for sf in sf_list_:
+            del sf['port_pair_group_parameters']
+            sf['sfis'] = sf['port_pairs']
+            del sf['port_pairs']
+
+    def __sfp_os2mano(self, sfp_list_dict):
+        """Transform the openstack format (Port Chain) to mano format (SFP)
+        sfp_list_dict can be a list of dict or a single dict
+        """
+        if isinstance(sfp_list_dict, dict):
+            sfp_list_ = [sfp_list_dict]
+        elif isinstance(sfp_list_dict, list):
+            sfp_list_ = sfp_list_dict
+        else:
+            raise TypeError(
+                "param sfp_list_dict must be a list or a dictionary")
+        for sfp in sfp_list_:
+            params = sfp.pop('chain_parameters')
+            sfc_encap = False
+            if params:
+                correlation = params.get('correlation')
+                if correlation:
+                    sfc_encap = True
+            sfp['sfc_encap'] = sfc_encap
+            sfp['spi'] = sfp.pop('chain_id')
+            sfp['classifications'] = sfp.pop('flow_classifiers')
+            sfp['service_functions'] = sfp.pop('port_pair_groups')
+
+    # placeholder for now; read TODO note below
+    def _validate_classification(self, type, definition):
+        # only legacy_flow_classifier Type is supported at this point
+        return True
+        # TODO(igordcard): this method should be an abstract method of an
+        # abstract Classification class to be implemented by the specific
+        # Types. Also, abstract vimconnector should call the validation
+        # method before the implemented VIM connectors are called.
+
+    def _format_exception(self, exception):
+        '''Transform a keystone, nova, neutron  exception into a vimconn exception'''
+
+        message_error = exception.message
+
+        if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
+                                  gl1Exceptions.HTTPNotFound)):
+            raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
+                               ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
+            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception,  (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
+            raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
+                                    neExceptions.NeutronException)):
+            raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception, nvExceptions.Conflict):
+            raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception, vimconn.vimconnException):
+            raise exception
+        else:  # ()
+            self.logger.error("General Exception " + message_error, exc_info=True)
+            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+
+    def _get_ids_from_name(self):
+        """
+         Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
+        :return: None
+        """
+        # get tenant_id if only tenant_name is supplied
+        self._reload_connection()
+        if not self.my_tenant_id:
+            raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}".
+                                                     format(self.tenant_name, self.tenant_id))
+        if self.config.get('security_groups') and not self.security_groups_id:
+            # convert from name to id
+            neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
+
+            self.security_groups_id = []
+            for sg in self.config.get('security_groups'):
+                for neutron_sg in neutron_sg_list:
+                    if sg in (neutron_sg["id"], neutron_sg["name"]):
+                        self.security_groups_id.append(neutron_sg["id"])
+                        break
+                else:
+                    self.security_groups_id = None
+                    raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg))
+
+    def check_vim_connectivity(self):
+        # just get network list to check connectivity and credentials
+        self.get_network_list(filter_dict={})
+
+    def get_tenant_list(self, filter_dict={}):
+        '''Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+        '''
+        self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
+        try:
+            self._reload_connection()
+            if self.api_version3:
+                project_class_list = self.keystone.projects.list(name=filter_dict.get("name"))
+            else:
+                project_class_list = self.keystone.tenants.findall(**filter_dict)
+            project_list=[]
+            for project in project_class_list:
+                if filter_dict.get('id') and filter_dict["id"] != project.id:
+                    continue
+                project_list.append(project.to_dict())
+            return project_list
+        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def new_tenant(self, tenant_name, tenant_description):
+        '''Adds a new tenant to openstack VIM. Returns the tenant identifier'''
+        self.logger.debug("Adding a new tenant name: %s", tenant_name)
+        try:
+            self._reload_connection()
+            if self.api_version3:
+                project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"),
+                                                        description=tenant_description, is_domain=False)
+            else:
+                project = self.keystone.tenants.create(tenant_name, tenant_description)
+            return project.id
+        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)  as e:
+            self._format_exception(e)
+
+    def delete_tenant(self, tenant_id):
+        '''Delete a tenant from openstack VIM. Returns the old tenant identifier'''
+        self.logger.debug("Deleting tenant %s from VIM", tenant_id)
+        try:
+            self._reload_connection()
+            if self.api_version3:
+                self.keystone.projects.delete(tenant_id)
+            else:
+                self.keystone.tenants.delete(tenant_id)
+            return tenant_id
+        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)  as e:
+            self._format_exception(e)
+
+    def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
+        # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
+        try:
+            new_net = None
+            created_items = {}
+            self._reload_connection()
+            network_dict = {'name': net_name, 'admin_state_up': True}
+            if net_type=="data" or net_type=="ptp":
+                if self.config.get('dataplane_physical_net') == None:
+                    raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network")
+                if not self.config.get('multisegment_support'):
+                    network_dict["provider:physical_network"] = self.config[
+                        'dataplane_physical_net']  # "physnet_sriov" #TODO physical
+                    network_dict["provider:network_type"] = "vlan"
+                    if vlan!=None:
+                        network_dict["provider:network_type"] = vlan
+                else:
+                    ###### Multi-segment case ######
+                    segment_list = []
+                    segment1_dict = {}
+                    segment1_dict["provider:physical_network"] = ''
+                    segment1_dict["provider:network_type"]     = 'vxlan'
+                    segment_list.append(segment1_dict)
+                    segment2_dict = {}
+                    segment2_dict["provider:physical_network"] = self.config['dataplane_physical_net']
+                    segment2_dict["provider:network_type"]     = "vlan"
+                    if self.config.get('multisegment_vlan_range'):
+                        vlanID = self._generate_multisegment_vlanID()
+                        segment2_dict["provider:segmentation_id"] = vlanID
+                    # else
+                    #     raise vimconn.vimconnConflictException(
+                    #         "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network")
+                    segment_list.append(segment2_dict)
+                    network_dict["segments"] = segment_list
+
+                ####### VIO Specific Changes #########
+                if self.vim_type == "VIO":
+                    if vlan is not None:
+                        network_dict["provider:segmentation_id"] = vlan
+                    else:
+                        if self.config.get('dataplane_net_vlan_range') is None:
+                            raise vimconn.vimconnConflictException("You must provide "\
+                                "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\
+                                "at config value before creating sriov network with vlan tag")
+
+                        network_dict["provider:segmentation_id"] = self._generate_vlanID()
+
+            network_dict["shared"] = shared
+            if self.config.get("disable_network_port_security"):
+                network_dict["port_security_enabled"] = False
+            new_net = self.neutron.create_network({'network':network_dict})
+            # print new_net
+            # create subnetwork, even if there is no profile
+            if not ip_profile:
+                ip_profile = {}
+            if not ip_profile.get('subnet_address'):
+                #Fake subnet is required
+                subnet_rand = random.randint(0, 255)
+                ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
+            if 'ip_version' not in ip_profile:
+                ip_profile['ip_version'] = "IPv4"
+            subnet = {"name": net_name+"-subnet",
+                    "network_id": new_net["network"]["id"],
+                    "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
+                    "cidr": ip_profile['subnet_address']
+                    }
+            # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
+            if ip_profile.get('gateway_address'):
+                subnet['gateway_ip'] = ip_profile['gateway_address']
+            else:
+                subnet['gateway_ip'] = None
+            if ip_profile.get('dns_address'):
+                subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
+            if 'dhcp_enabled' in ip_profile:
+                subnet['enable_dhcp'] = False if \
+                    ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True
+            if ip_profile.get('dhcp_start_address'):
+                subnet['allocation_pools'] = []
+                subnet['allocation_pools'].append(dict())
+                subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
+            if ip_profile.get('dhcp_count'):
+                #parts = ip_profile['dhcp_start_address'].split('.')
+                #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
+                ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
+                ip_int += ip_profile['dhcp_count'] - 1
+                ip_str = str(netaddr.IPAddress(ip_int))
+                subnet['allocation_pools'][0]['end'] = ip_str
+            #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
+            self.neutron.create_subnet({"subnet": subnet} )
+
+            if net_type == "data" and self.config.get('multisegment_support'):
+                if self.config.get('l2gw_support'):
+                    l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
+                    for l2gw in l2gw_list:
+                        l2gw_conn = {}
+                        l2gw_conn["l2_gateway_id"] = l2gw["id"]
+                        l2gw_conn["network_id"] = new_net["network"]["id"]
+                        l2gw_conn["segmentation_id"] = str(vlanID)
+                        new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn})
+                        created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True
+            return new_net["network"]["id"], created_items
+        except Exception as e:
+            #delete l2gw connections (if any) before deleting the network
+            for k, v in created_items.items():
+                if not v:  # skip already deleted
+                    continue
+                try:
+                    k_item, _, k_id = k.partition(":")
+                    if k_item == "l2gwconn":
+                        self.neutron.delete_l2_gateway_connection(k_id)
+                except Exception as e2:
+                    self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2))
+            if new_net:
+                self.neutron.delete_network(new_net['network']['id'])
+            self._format_exception(e)
+
+    def get_network_list(self, filter_dict={}):
+        '''Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            shared: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries
+        '''
+        self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')  #T ODO check
+            net_dict = self.neutron.list_networks(**filter_dict_os)
+            net_list = net_dict["networks"]
+            self.__net_os2mano(net_list)
+            return net_list
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def get_network(self, net_id):
+        '''Obtain details of network from VIM
+        Returns the network information from a network id'''
+        self.logger.debug(" Getting tenant network %s from VIM", net_id)
+        filter_dict={"id": net_id}
+        net_list = self.get_network_list(filter_dict)
+        if len(net_list)==0:
+            raise vimconn.vimconnNotFoundException("Network '{}' not found".format(net_id))
+        elif len(net_list)>1:
+            raise vimconn.vimconnConflictException("Found more than one network with this criteria")
+        net = net_list[0]
+        subnets=[]
+        for subnet_id in net.get("subnets", () ):
+            try:
+                subnet = self.neutron.show_subnet(subnet_id)
+            except Exception as e:
+                self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e)))
+                subnet = {"id": subnet_id, "fault": str(e)}
+            subnets.append(subnet)
+        net["subnets"] = subnets
+        net["encapsulation"] = net.get('provider:network_type')
+        net["encapsulation_type"] = net.get('provider:network_type')
+        net["segmentation_id"] = net.get('provider:segmentation_id')
+        net["encapsulation_id"] = net.get('provider:segmentation_id')
+        return net
+
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+        self.logger.debug("Deleting network '%s' from VIM", net_id)
+        if created_items == None:
+            created_items = {}
+        try:
+            self._reload_connection()
+            #delete l2gw connections (if any) before deleting the network
+            for k, v in created_items.items():
+                if not v:  # skip already deleted
+                    continue
+                try:
+                    k_item, _, k_id = k.partition(":")
+                    if k_item == "l2gwconn":
+                        self.neutron.delete_l2_gateway_connection(k_id)
+                except Exception as e:
+                    self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e))
+            #delete VM ports attached to this networks before the network
+            ports = self.neutron.list_ports(network_id=net_id)
+            for p in ports['ports']:
+                try:
+                    self.neutron.delete_port(p["id"])
+                except Exception as e:
+                    self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+            self.neutron.delete_network(net_id)
+            return net_id
+        except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException,
+                ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def refresh_nets_status(self, net_list):
+        '''Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down),
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        '''
+        net_dict={}
+        for net_id in net_list:
+            net = {}
+            try:
+                net_vim = self.get_network(net_id)
+                if net_vim['status'] in netStatus2manoFormat:
+                    net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+                else:
+                    net["status"] = "OTHER"
+                    net["error_msg"] = "VIM status reported " + net_vim['status']
+
+                if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
+                    net['status'] = 'DOWN'
+
+                net['vim_info'] = self.serialize(net_vim)
+
+                if net_vim.get('fault'):  #TODO
+                    net['error_msg'] = str(net_vim['fault'])
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "DELETED"
+                net['error_msg'] = str(e)
+            except vimconn.vimconnException as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "VIM_ERROR"
+                net['error_msg'] = str(e)
+            net_dict[net_id] = net
+        return net_dict
+
+    def get_flavor(self, flavor_id):
+        '''Obtain flavor details from the  VIM. Returns the flavor dict details'''
+        self.logger.debug("Getting flavor '%s'", flavor_id)
+        try:
+            self._reload_connection()
+            flavor = self.nova.flavors.find(id=flavor_id)
+            #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            return flavor.to_dict()
+        except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def get_flavor_id_from_data(self, flavor_dict):
+        """Obtain flavor id that match the flavor description
+           Returns the flavor_id or raises a vimconnNotFoundException
+           flavor_dict: contains the required ram, vcpus, disk
+           If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
+                and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
+                vimconnNotFoundException is raised
+        """
+        exact_match = False if self.config.get('use_existing_flavors') else True
+        try:
+            self._reload_connection()
+            flavor_candidate_id = None
+            flavor_candidate_data = (10000, 10000, 10000)
+            flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
+            # numa=None
+            extended = flavor_dict.get("extended", {})
+            if extended:
+                #TODO
+                raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemented")
+                # if len(numas) > 1:
+                #     raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa")
+                # numa=numas[0]
+                # numas = extended.get("numas")
+            for flavor in self.nova.flavors.list():
+                epa = flavor.get_keys()
+                if epa:
+                    continue
+                    # TODO
+                flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
+                if flavor_data == flavor_target:
+                    return flavor.id
+                elif not exact_match and flavor_target < flavor_data < flavor_candidate_data:
+                    flavor_candidate_id = flavor.id
+                    flavor_candidate_data = flavor_data
+            if not exact_match and flavor_candidate_id:
+                return flavor_candidate_id
+            raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
+        except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def process_resource_quota(self, quota, prefix, extra_specs):
+        """
+        :param prefix:
+        :param extra_specs: 
+        :return:
+        """
+        if 'limit' in quota:
+            extra_specs["quota:" + prefix + "_limit"] = quota['limit']
+        if 'reserve' in quota:
+            extra_specs["quota:" + prefix + "_reservation"] = quota['reserve']
+        if 'shares' in quota:
+            extra_specs["quota:" + prefix + "_shares_level"] = "custom"
+            extra_specs["quota:" + prefix + "_shares_share"] = quota['shares']
+
+    def new_flavor(self, flavor_data, change_name_if_used=True):
+        '''Adds a tenant flavor to openstack VIM
+        if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
+        Returns the flavor identifier
+        '''
+        self.logger.debug("Adding flavor '%s'", str(flavor_data))
+        retry=0
+        max_retries=3
+        name_suffix = 0
+        try:
+            name=flavor_data['name']
+            while retry<max_retries:
+                retry+=1
+                try:
+                    self._reload_connection()
+                    if change_name_if_used:
+                        #get used names
+                        fl_names=[]
+                        fl=self.nova.flavors.list()
+                        for f in fl:
+                            fl_names.append(f.name)
+                        while name in fl_names:
+                            name_suffix += 1
+                            name = flavor_data['name']+"-" + str(name_suffix)
+
+                    ram = flavor_data.get('ram',64)
+                    vcpus = flavor_data.get('vcpus',1)
+                    extra_specs={}
+
+                    extended = flavor_data.get("extended")
+                    if extended:
+                        numas=extended.get("numas")
+                        if numas:
+                            numa_nodes = len(numas)
+                            if numa_nodes > 1:
+                                return -1, "Can not add flavor with more than one numa"
+                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
+                            extra_specs["hw:mem_page_size"] = "large"
+                            extra_specs["hw:cpu_policy"] = "dedicated"
+                            extra_specs["hw:numa_mempolicy"] = "strict"
+                            if self.vim_type == "VIO":
+                                extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+                                extra_specs["vmware:latency_sensitivity_level"] = "high"
+                            for numa in numas:
+                                #overwrite ram and vcpus
+                                #check if key 'memory' is present in numa else use ram value at flavor
+                                if 'memory' in numa:
+                                    ram = numa['memory']*1024
+                                #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+                                extra_specs["hw:cpu_sockets"] = 1
+                                if 'paired-threads' in numa:
+                                    vcpus = numa['paired-threads']*2
+                                    #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+                                    extra_specs["hw:cpu_thread_policy"] = "require"
+                                    extra_specs["hw:cpu_policy"] = "dedicated"
+                                elif 'cores' in numa:
+                                    vcpus = numa['cores']
+                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
+                                    extra_specs["hw:cpu_policy"] = "dedicated"
+                                elif 'threads' in numa:
+                                    vcpus = numa['threads']
+                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
+                                    extra_specs["hw:cpu_policy"] = "dedicated"
+                                # for interface in numa.get("interfaces",() ):
+                                #     if interface["dedicated"]=="yes":
+                                #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+                        elif extended.get("cpu-quota"):
+                            self.process_resource_quota(extended.get("cpu-quota"), "cpu", extra_specs)
+                        if extended.get("mem-quota"):
+                            self.process_resource_quota(extended.get("mem-quota"), "memory", extra_specs)
+                        if extended.get("vif-quota"):
+                            self.process_resource_quota(extended.get("vif-quota"), "vif", extra_specs)
+                        if extended.get("disk-io-quota"):
+                            self.process_resource_quota(extended.get("disk-io-quota"), "disk_io", extra_specs)
+                    #create flavor
+                    new_flavor=self.nova.flavors.create(name,
+                                    ram,
+                                    vcpus,
+                                    flavor_data.get('disk',0),
+                                    is_public=flavor_data.get('is_public', True)
+                                )
+                    #add metadata
+                    if extra_specs:
+                        new_flavor.set_keys(extra_specs)
+                    return new_flavor.id
+                except nvExceptions.Conflict as e:
+                    if change_name_if_used and retry < max_retries:
+                        continue
+                    self._format_exception(e)
+        #except nvExceptions.BadRequest as e:
+        except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
+            self._format_exception(e)
+
+    def delete_flavor(self,flavor_id):
+        '''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
+        '''
+        try:
+            self._reload_connection()
+            self.nova.flavors.delete(flavor_id)
+            return flavor_id
+        #except nvExceptions.BadRequest as e:
+        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def new_image(self,image_dict):
+        '''
+        Adds a tenant image to VIM. imge_dict is a dictionary with:
+            name: name
+            disk_format: qcow2, vhd, vmdk, raw (by default), ...
+            location: path or URI
+            public: "yes" or "no"
+            metadata: metadata of the image
+        Returns the image_id
+        '''
+        retry=0
+        max_retries=3
+        while retry<max_retries:
+            retry+=1
+            try:
+                self._reload_connection()
+                #determine format  http://docs.openstack.org/developer/glance/formats.html
+                if "disk_format" in image_dict:
+                    disk_format=image_dict["disk_format"]
+                else: #autodiscover based on extension
+                    if image_dict['location'].endswith(".qcow2"):
+                        disk_format="qcow2"
+                    elif image_dict['location'].endswith(".vhd"):
+                        disk_format="vhd"
+                    elif image_dict['location'].endswith(".vmdk"):
+                        disk_format="vmdk"
+                    elif image_dict['location'].endswith(".vdi"):
+                        disk_format="vdi"
+                    elif image_dict['location'].endswith(".iso"):
+                        disk_format="iso"
+                    elif image_dict['location'].endswith(".aki"):
+                        disk_format="aki"
+                    elif image_dict['location'].endswith(".ari"):
+                        disk_format="ari"
+                    elif image_dict['location'].endswith(".ami"):
+                        disk_format="ami"
+                    else:
+                        disk_format="raw"
+                self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
+                if self.vim_type == "VIO":
+                    container_format = "bare"
+                    if 'container_format' in image_dict:
+                        container_format = image_dict['container_format']
+                    new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
+                                                          disk_format=disk_format)
+                else:
+                    new_image = self.glance.images.create(name=image_dict['name'])
+                if image_dict['location'].startswith("http"):
+                    # TODO there is not a method to direct download. It must be downloaded locally with requests
+                    raise vimconn.vimconnNotImplemented("Cannot create image from URL")
+                else: #local path
+                    with open(image_dict['location']) as fimage:
+                        self.glance.images.upload(new_image.id, fimage)
+                        #new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+                        #    container_format="bare", data=fimage, disk_format=disk_format)
+                metadata_to_load = image_dict.get('metadata')
+                # TODO location is a reserved word for current openstack versions. fixed for VIO please check for openstack
+                if self.vim_type == "VIO":
+                    metadata_to_load['upload_location'] = image_dict['location']
+                else:
+                    metadata_to_load['location'] = image_dict['location']
+                self.glance.images.update(new_image.id, **metadata_to_load)
+                return new_image.id
+            except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
+                self._format_exception(e)
+            except (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+                if retry==max_retries:
+                    continue
+                self._format_exception(e)
+            except IOError as e:  #can not open the file
+                raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e)+ " for " + image_dict['location'],
+                                                         http_code=vimconn.HTTP_Bad_Request)
+
+    def delete_image(self, image_id):
+        '''Deletes a tenant image from openstack VIM. Returns the old id
+        '''
+        try:
+            self._reload_connection()
+            self.glance.images.delete(image_id)
+            return image_id
+        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: #TODO remove
+            self._format_exception(e)
+
+    def get_image_id_from_path(self, path):
+        '''Get the image id from image path in the VIM database. Returns the image_id'''
+        try:
+            self._reload_connection()
+            images = self.glance.images.list()
+            for image in images:
+                if image.metadata.get("location")==path:
+                    return image.id
+            raise vimconn.vimconnNotFoundException("image with location '{}' not found".format( path))
+        except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+            self._format_exception(e)
+
+    def get_image_list(self, filter_dict={}):
+        '''Obtain tenant images from VIM
+        Filter_dict can be:
+            id: image id
+            name: image name
+            checksum: image checksum
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+        self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+            #First we filter by the available filter fields: name, id. The others are removed.
+            image_list = self.glance.images.list()
+            filtered_list = []
+            for image in image_list:
+                try:
+                    if filter_dict.get("name") and image["name"] != filter_dict["name"]:
+                        continue
+                    if filter_dict.get("id") and image["id"] != filter_dict["id"]:
+                        continue
+                    if filter_dict.get("checksum") and image["checksum"] != filter_dict["checksum"]:
+                        continue
+
+                    filtered_list.append(image.copy())
+                except gl1Exceptions.HTTPNotFound:
+                    pass
+            return filtered_list
+        except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+            self._format_exception(e)
+
+    def __wait_for_vm(self, vm_id, status):
+        """wait until vm is in the desired status and return True.
+        If the VM gets in ERROR status, return false.
+        If the timeout is reached generate an exception"""
+        elapsed_time = 0
+        while elapsed_time < server_timeout:
+            vm_status = self.nova.servers.get(vm_id).status
+            if vm_status == status:
+                return True
+            if vm_status == 'ERROR':
+                return False
+            time.sleep(5)
+            elapsed_time += 5
+
+        # if we exceeded the timeout rollback
+        if elapsed_time >= server_timeout:
+            raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
+                                           http_code=vimconn.HTTP_Request_Timeout)
+
+    def _get_openstack_availablity_zones(self):
+        """
+        Get from openstack availability zones available
+        :return:
+        """
+        try:
+            openstack_availability_zone = self.nova.availability_zones.list()
+            openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
+                                           if zone.zoneName != 'internal']
+            return openstack_availability_zone
+        except Exception as e:
+            return None
+
+    def _set_availablity_zones(self):
+        """
+        Set vim availablity zone
+        :return:
+        """
+
+        if 'availability_zone' in self.config:
+            vim_availability_zones = self.config.get('availability_zone')
+            if isinstance(vim_availability_zones, str):
+                self.availability_zone = [vim_availability_zones]
+            elif isinstance(vim_availability_zones, list):
+                self.availability_zone = vim_availability_zones
+        else:
+            self.availability_zone = self._get_openstack_availablity_zones()
+
+    def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+        """
+        Return thge availability zone to be used by the created VM.
+        :return: The VIM availability zone to be used or None
+        """
+        if availability_zone_index is None:
+            if not self.config.get('availability_zone'):
+                return None
+            elif isinstance(self.config.get('availability_zone'), str):
+                return self.config['availability_zone']
+            else:
+                # TODO consider using a different parameter at config for default AV and AV list match
+                return self.config['availability_zone'][0]
+
+        vim_availability_zones = self.availability_zone
+        # check if VIM offer enough availability zones describe in the VNFD
+        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+            # check if all the names of NFV AV match VIM AV names
+            match_by_index = False
+            for av in availability_zone_list:
+                if av not in vim_availability_zones:
+                    match_by_index = True
+                    break
+            if match_by_index:
+                return vim_availability_zones[availability_zone_index]
+            else:
+                return availability_zone_list[availability_zone_index]
+        else:
+            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                       availability_zone_index=None, availability_zone_list=None):
+        """Adds a VM instance to VIM
+        Params:
+            start: indicates if VM must start or boot in pause mode. Ignored
+            image_id,flavor_id: iamge and flavor uuid
+            net_list: list of interfaces, each one is a dictionary with:
+                name:
+                net_id: network uuid to connect
+                vpci: virtual vcpi to assign, ignored because openstack lack #TODO
+                model: interface model, ignored #TODO
+                mac_address: used for  SR-IOV ifaces #TODO for other types
+                use: 'data', 'bridge',  'mgmt'
+                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                vim_id: filled/added by this function
+                floating_ip: True/False (or it can be None)
+            'cloud_config': (optional) dictionary with:
+            'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+            'users': (optional) list of users to be inserted, each item is a dict with:
+                'name': (mandatory) user name,
+                'key-pairs': (optional) list of strings with the public key to be inserted to the user
+            'user-data': (optional) string is a text script to be passed directly to cloud-init
+            'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                'dest': (mandatory) string with the destination absolute path
+                'encoding': (optional, by default text). Can be one of:
+                    'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                'content' (mandatory): string with the content of the file
+                'permissions': (optional) string with file permissions, typically octal notation '0644'
+                'owner': (optional) file owner, string with the format 'owner:group'
+            'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+                'vim_id' (optional) should use this existing volume id
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+                #TODO ip, security groups
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
+        try:
+            server = None
+            created_items = {}
+            # metadata = {}
+            net_list_vim = []
+            external_network = []   # list of external networks to be connected to instance, later on used to create floating_ip
+            no_secured_ports = []   # List of port-is with port-security disabled
+            self._reload_connection()
+            # metadata_vpci = {}   # For a specific neutron plugin
+            block_device_mapping = None
+
+            for net in net_list:
+                if not net.get("net_id"):   # skip non connected iface
+                    continue
+
+                port_dict = {
+                    "network_id": net["net_id"],
+                    "name": net.get("name"),
+                    "admin_state_up": True
+                }
+                if self.config.get("security_groups") and net.get("port_security") is not False and \
+                        not self.config.get("no_port_security_extension"):
+                    if not self.security_groups_id:
+                        self._get_ids_from_name()
+                    port_dict["security_groups"] = self.security_groups_id
+
+                if net["type"]=="virtual":
+                    pass
+                    # if "vpci" in net:
+                    #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
+                    # if "vpci" in net:
+                    #     if "VF" not in metadata_vpci:
+                    #         metadata_vpci["VF"]=[]
+                    #     metadata_vpci["VF"].append([ net["vpci"], "" ])
+                    port_dict["binding:vnic_type"]="direct"
+                    # VIO specific Changes
+                    if self.vim_type == "VIO":
+                        # Need to create port with port_security_enabled = False and no-security-groups
+                        port_dict["port_security_enabled"]=False
+                        port_dict["provider_security_groups"]=[]
+                        port_dict["security_groups"]=[]
+                else:   # For PT PCI-PASSTHROUGH
+                    # VIO specific Changes
+                    # Current VIO release does not support port with type 'direct-physical'
+                    # So no need to create virtual port in case of PCI-device.
+                    # Will update port_dict code when support gets added in next VIO release
+                    if self.vim_type == "VIO":
+                        raise vimconn.vimconnNotSupportedException(
+                            "Current VIO release does not support full passthrough (PT)")
+                    # if "vpci" in net:
+                    #     if "PF" not in metadata_vpci:
+                    #         metadata_vpci["PF"]=[]
+                    #     metadata_vpci["PF"].append([ net["vpci"], "" ])
+                    port_dict["binding:vnic_type"]="direct-physical"
+                if not port_dict["name"]:
+                    port_dict["name"]=name
+                if net.get("mac_address"):
+                    port_dict["mac_address"]=net["mac_address"]
+                if net.get("ip_address"):
+                    port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}]
+                    # TODO add 'subnet_id': <subnet_id>
+                new_port = self.neutron.create_port({"port": port_dict })
+                created_items["port:" + str(new_port["port"]["id"])] = True
+                net["mac_adress"] = new_port["port"]["mac_address"]
+                net["vim_id"] = new_port["port"]["id"]
+                # if try to use a network without subnetwork, it will return a emtpy list
+                fixed_ips = new_port["port"].get("fixed_ips")
+                if fixed_ips:
+                    net["ip"] = fixed_ips[0].get("ip_address")
+                else:
+                    net["ip"] = None
+
+                port = {"port-id": new_port["port"]["id"]}
+                if float(self.nova.api_version.get_string()) >= 2.32:
+                    port["tag"] = new_port["port"]["name"]
+                net_list_vim.append(port)
+
+                if net.get('floating_ip', False):
+                    net['exit_on_floating_ip_error'] = True
+                    external_network.append(net)
+                elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
+                    net['exit_on_floating_ip_error'] = False
+                    external_network.append(net)
+                    net['floating_ip'] = self.config.get('use_floating_ip')
+
+                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
+                # As a workaround we wait until the VM is active and then disable the port-security
+                if net.get("port_security") == False and not self.config.get("no_port_security_extension"):
+                    no_secured_ports.append(new_port["port"]["id"])
+
+            # if metadata_vpci:
+            #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
+            #     if len(metadata["pci_assignement"]) >255:
+            #         #limit the metadata size
+            #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
+            #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
+            #         metadata = {}
+
+            self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
+                              name, image_id, flavor_id, str(net_list_vim), description)
+
+            # cloud config
+            config_drive, userdata = self._create_user_data(cloud_config)
+
+            # Create additional volumes in case these are present in disk_list
+            base_disk_index = ord('b')
+            if disk_list:
+                block_device_mapping = {}
+                for disk in disk_list:
+                    if disk.get('vim_id'):
+                        block_device_mapping['_vd' + chr(base_disk_index)] = disk['vim_id']
+                    else:
+                        if 'image_id' in disk:
+                            volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                                                chr(base_disk_index), imageRef=disk['image_id'])
+                        else:
+                            volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                                                chr(base_disk_index))
+                        created_items["volume:" + str(volume.id)] = True
+                        block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
+                    base_disk_index += 1
+
+                # Wait until created volumes are with status available
+                elapsed_time = 0
+                while elapsed_time < volume_timeout:
+                    for created_item in created_items:
+                        v, _, volume_id = created_item.partition(":")
+                        if v == 'volume':
+                            if self.cinder.volumes.get(volume_id).status != 'available':
+                                break
+                    else:  # all ready: break from while
+                        break
+                    time.sleep(5)
+                    elapsed_time += 5
+                # If we exceeded the timeout rollback
+                if elapsed_time >= volume_timeout:
+                    raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
+                                                   http_code=vimconn.HTTP_Request_Timeout)
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
+
+            self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+                              "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+                              "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
+                                                                self.config.get("security_groups"), vm_av_zone,
+                                                                self.config.get('keypair'), userdata, config_drive,
+                                                                block_device_mapping))
+            server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
+                                              security_groups=self.config.get("security_groups"),
+                                              # TODO remove security_groups in future versions. Already at neutron port
+                                              availability_zone=vm_av_zone,
+                                              key_name=self.config.get('keypair'),
+                                              userdata=userdata,
+                                              config_drive=config_drive,
+                                              block_device_mapping=block_device_mapping
+                                              )  # , description=description)
+
+            vm_start_time = time.time()
+            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
+            if no_secured_ports:
+                self.__wait_for_vm(server.id, 'ACTIVE')
+
+            for port_id in no_secured_ports:
+                try:
+                    self.neutron.update_port(port_id,
+                                             {"port": {"port_security_enabled": False, "security_groups": None}})
+                except Exception as e:
+                    raise vimconn.vimconnException("It was not possible to disable port security for port {}".format(
+                        port_id))
+            # print "DONE :-)", server
+
+            # pool_id = None
+            if external_network:
+                floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+            for floating_network in external_network:
+                try:
+                    assigned = False
+                    while not assigned:
+                        if floating_ips:
+                            ip = floating_ips.pop(0)
+                            if ip.get("port_id", False) or ip.get('tenant_id') != server.tenant_id:
+                                continue
+                            if isinstance(floating_network['floating_ip'], str):
+                                if ip.get("floating_network_id") != floating_network['floating_ip']:
+                                    continue
+                            free_floating_ip = ip["id"]
+                        else:
+                            if isinstance(floating_network['floating_ip'], str) and \
+                                floating_network['floating_ip'].lower() != "true":
+                                pool_id = floating_network['floating_ip']
+                            else:
+                                # Find the external network
+                                external_nets = list()
+                                for net in self.neutron.list_networks()['networks']:
+                                    if net['router:external']:
+                                            external_nets.append(net)
+
+                                if len(external_nets) == 0:
+                                    raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
+                                                                   "network is present",
+                                                                    http_code=vimconn.HTTP_Conflict)
+                                if len(external_nets) > 1:
+                                    raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
+                                                                   "external networks are present",
+                                                                   http_code=vimconn.HTTP_Conflict)
+
+                                pool_id = external_nets[0].get('id')
+                            param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
+                            try:
+                                # self.logger.debug("Creating floating IP")
+                                new_floating_ip = self.neutron.create_floatingip(param)
+                                free_floating_ip = new_floating_ip['floatingip']['id']
+                            except Exception as e:
+                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot create new floating_ip " +
+                                                               str(e), http_code=vimconn.HTTP_Conflict)
+
+                        while not assigned:
+                            try:
+                                # the vim_id key contains the neutron.port_id
+                                self.neutron.update_floatingip(free_floating_ip,
+                                                               {"floatingip": {"port_id": floating_network["vim_id"]}})
+                                # Using nove is deprecated on nova client 10.0
+                                assigned = True
+                            except Exception as e:
+                                # openstack need some time after VM creation to asign an IP. So retry if fails
+                                vm_status = self.nova.servers.get(server.id).status
+                                if vm_status != 'ACTIVE' and vm_status != 'ERROR':
+                                    if time.time() - vm_start_time < server_timeout:
+                                        time.sleep(5)
+                                        continue
+                                raise vimconn.vimconnException(
+                                    "Cannot create floating_ip: {} {}".format(type(e).__name__, e),
+                                    http_code=vimconn.HTTP_Conflict)
+
+                except Exception as e:
+                    if not floating_network['exit_on_floating_ip_error']:
+                        self.logger.warning("Cannot create floating_ip. %s", str(e))
+                        continue
+                    raise
+
+            return server.id, created_items
+#        except nvExceptions.NotFound as e:
+#            error_value=-vimconn.HTTP_Not_Found
+#            error_text= "vm instance %s not found" % vm_id
+#        except TypeError as e:
+#            raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
+
+        except Exception as e:
+            server_id = None
+            if server:
+                server_id = server.id
+            try:
+                self.delete_vminstance(server_id, created_items)
+            except Exception as e2:
+                self.logger.error("new_vminstance rollback fail {}".format(e2))
+
+            self._format_exception(e)
+
+    def get_vminstance(self,vm_id):
+        '''Returns the VM instance information from VIM'''
+        #self.logger.debug("Getting VM from VIM")
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            return server.to_dict()
+        except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
+            self._format_exception(e)
+
+    def get_vminstance_console(self,vm_id, console_type="vnc"):
+        '''
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types,
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
+        '''
+        self.logger.debug("Getting VM CONSOLE from VIM")
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            if console_type == None or console_type == "novnc":
+                console_dict = server.get_vnc_console("novnc")
+            elif console_type == "xvpvnc":
+                console_dict = server.get_vnc_console(console_type)
+            elif console_type == "rdp-html5":
+                console_dict = server.get_rdp_console(console_type)
+            elif console_type == "spice-html5":
+                console_dict = server.get_spice_console(console_type)
+            else:
+                raise vimconn.vimconnException("console type '{}' not allowed".format(console_type), http_code=vimconn.HTTP_Bad_Request)
+
+            console_dict1 = console_dict.get("console")
+            if console_dict1:
+                console_url = console_dict1.get("url")
+                if console_url:
+                    #parse console_url
+                    protocol_index = console_url.find("//")
+                    suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
+                    port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+                    if protocol_index < 0 or port_index<0 or suffix_index<0:
+                        return -vimconn.HTTP_Internal_Server_Error, "Unexpected response from VIM"
+                    console_dict={"protocol": console_url[0:protocol_index],
+                                  "server":   console_url[protocol_index+2:port_index],
+                                  "port":     console_url[port_index:suffix_index],
+                                  "suffix":   console_url[suffix_index+1:]
+                                  }
+                    protocol_index += 2
+                    return console_dict
+            raise vimconn.vimconnUnexpectedResponse("Unexpected response from VIM")
+
+        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e:
+            self._format_exception(e)
+
+    def delete_vminstance(self, vm_id, created_items=None):
+        '''Removes a VM instance from VIM. Returns the old identifier
+        '''
+        #print "osconnector: Getting VM from VIM"
+        if created_items == None:
+            created_items = {}
+        try:
+            self._reload_connection()
+            # delete VM ports attached to this networks before the virtual machine
+            for k, v in created_items.items():
+                if not v:  # skip already deleted
+                    continue
+                try:
+                    k_item, _, k_id = k.partition(":")
+                    if k_item == "port":
+                        self.neutron.delete_port(k_id)
+                except Exception as e:
+                    self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+            # #commented because detaching the volumes makes the servers.delete not work properly ?!?
+            # #dettach volumes attached
+            # server = self.nova.servers.get(vm_id)
+            # volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']   #volume['id']
+            # #for volume in volumes_attached_dict:
+            # #    self.cinder.volumes.detach(volume['id'])
+
+            if vm_id:
+                self.nova.servers.delete(vm_id)
+
+            # delete volumes. Although having detached, they should have in active status before deleting
+            # we ensure in this loop
+            keep_waiting = True
+            elapsed_time = 0
+            while keep_waiting and elapsed_time < volume_timeout:
+                keep_waiting = False
+                for k, v in created_items.items():
+                    if not v:  # skip already deleted
+                        continue
+                    try:
+                        k_item, _, k_id = k.partition(":")
+                        if k_item == "volume":
+                            if self.cinder.volumes.get(k_id).status != 'available':
+                                keep_waiting = True
+                            else:
+                                self.cinder.volumes.delete(k_id)
+                    except Exception as e:
+                        self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e))
+                if keep_waiting:
+                    time.sleep(1)
+                    elapsed_time += 1
+            return None
+        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def refresh_vms_status(self, vm_list):
+        '''Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                #  CREATING (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces:
+                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+                        compute_node:     #identification of compute node where PF,VF interface is allocated
+                        pci:              #PCI address of the NIC that hosts the PF,VF
+                        vlan:             #physical VLAN used for VF
+        '''
+        vm_dict={}
+        self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
+        for vm_id in vm_list:
+            vm={}
+            try:
+                vm_vim = self.get_vminstance(vm_id)
+                if vm_vim['status'] in vmStatus2manoFormat:
+                    vm['status']    =  vmStatus2manoFormat[ vm_vim['status'] ]
+                else:
+                    vm['status']    = "OTHER"
+                    vm['error_msg'] = "VIM status reported " + vm_vim['status']
+
+                vm['vim_info'] = self.serialize(vm_vim)
+
+                vm["interfaces"] = []
+                if vm_vim.get('fault'):
+                    vm['error_msg'] = str(vm_vim['fault'])
+                #get interfaces
+                try:
+                    self._reload_connection()
+                    port_dict = self.neutron.list_ports(device_id=vm_id)
+                    for port in port_dict["ports"]:
+                        interface={}
+                        interface['vim_info'] = self.serialize(port)
+                        interface["mac_address"] = port.get("mac_address")
+                        interface["vim_net_id"] = port["network_id"]
+                        interface["vim_interface_id"] = port["id"]
+                        # check if OS-EXT-SRV-ATTR:host is there,
+                        # in case of non-admin credentials, it will be missing
+                        if vm_vim.get('OS-EXT-SRV-ATTR:host'):
+                            interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
+                        interface["pci"] = None
+
+                        # check if binding:profile is there,
+                        # in case of non-admin credentials, it will be missing
+                        if port.get('binding:profile'):
+                            if port['binding:profile'].get('pci_slot'):
+                                # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
+                                # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+                                #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
+                                pci = port['binding:profile']['pci_slot']
+                                # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+                                interface["pci"] = pci
+                        interface["vlan"] = None
+                        #if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+                        network = self.neutron.show_network(port["network_id"])
+                        if network['network'].get('provider:network_type') == 'vlan' and \
+                            port.get("binding:vnic_type") == "direct":
+                            interface["vlan"] = network['network'].get('provider:segmentation_id')
+                        ips=[]
+                        #look for floating ip address
+                        try:
+                            floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
+                            if floating_ip_dict.get("floatingips"):
+                                ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") )
+                        except Exception:
+                            pass
+
+                        for subnet in port["fixed_ips"]:
+                            ips.append(subnet["ip_address"])
+                        interface["ip_address"] = ";".join(ips)
+                        vm["interfaces"].append(interface)
+                except Exception as e:
+                    self.logger.error("Error getting vm interface information {}: {}".format(type(e).__name__, e),
+                                      exc_info=True)
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "DELETED"
+                vm['error_msg'] = str(e)
+            except vimconn.vimconnException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "VIM_ERROR"
+                vm['error_msg'] = str(e)
+            vm_dict[vm_id] = vm
+        return vm_dict
+
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        '''Send and action over a VM instance from VIM
+        Returns None or the console dict if the action was successfully sent to the VIM'''
+        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            if "start" in action_dict:
+                if action_dict["start"]=="rebuild":
+                    server.rebuild()
+                else:
+                    if server.status=="PAUSED":
+                        server.unpause()
+                    elif server.status=="SUSPENDED":
+                        server.resume()
+                    elif server.status=="SHUTOFF":
+                        server.start()
+            elif "pause" in action_dict:
+                server.pause()
+            elif "resume" in action_dict:
+                server.resume()
+            elif "shutoff" in action_dict or "shutdown" in action_dict:
+                server.stop()
+            elif "forceOff" in action_dict:
+                server.stop() #TODO
+            elif "terminate" in action_dict:
+                server.delete()
+            elif "createImage" in action_dict:
+                server.create_image()
+                #"path":path_schema,
+                #"description":description_schema,
+                #"name":name_schema,
+                #"metadata":metadata_schema,
+                #"imageRef": id_schema,
+                #"disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+            elif "rebuild" in action_dict:
+                server.rebuild(server.image['id'])
+            elif "reboot" in action_dict:
+                server.reboot() #reboot_type='SOFT'
+            elif "console" in action_dict:
+                console_type = action_dict["console"]
+                if console_type == None or console_type == "novnc":
+                    console_dict = server.get_vnc_console("novnc")
+                elif console_type == "xvpvnc":
+                    console_dict = server.get_vnc_console(console_type)
+                elif console_type == "rdp-html5":
+                    console_dict = server.get_rdp_console(console_type)
+                elif console_type == "spice-html5":
+                    console_dict = server.get_spice_console(console_type)
+                else:
+                    raise vimconn.vimconnException("console type '{}' not allowed".format(console_type),
+                                                   http_code=vimconn.HTTP_Bad_Request)
+                try:
+                    console_url = console_dict["console"]["url"]
+                    #parse console_url
+                    protocol_index = console_url.find("//")
+                    suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
+                    port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+                    if protocol_index < 0 or port_index<0 or suffix_index<0:
+                        raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
+                    console_dict2={"protocol": console_url[0:protocol_index],
+                                  "server":   console_url[protocol_index+2 : port_index],
+                                  "port":     int(console_url[port_index+1 : suffix_index]),
+                                  "suffix":   console_url[suffix_index+1:]
+                                  }
+                    return console_dict2
+                except Exception as e:
+                    raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
+
+            return None
+        except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
+            self._format_exception(e)
+        #TODO insert exception vimconn.HTTP_Unauthorized
+
+    ####### VIO Specific Changes #########
+    def _generate_vlanID(self):
+        """
+         Method to get unused vlanID
+            Args:
+                None
+            Returns:
+                vlanID
+        """
+        #Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
+        for net in networks:
+            if net.get('provider:segmentation_id'):
+                usedVlanIDs.append(net.get('provider:segmentation_id'))
+        used_vlanIDs = set(usedVlanIDs)
+
+        #find unused VLAN ID
+        for vlanID_range in self.config.get('dataplane_net_vlan_range'):
+            try:
+                start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+                for vlanID in range(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+        else:
+            raise vimconn.vimconnConflictException("Unable to create the SRIOV VLAN network."\
+                " All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range')))
+
+
+    def _generate_multisegment_vlanID(self):
+        """
+         Method to get unused vlanID
+            Args:
+                None
+            Returns:
+                vlanID
+        """
+        #Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
+        for net in networks:
+            if net.get('provider:network_type') == "vlan" and net.get('provider:segmentation_id'):
+                usedVlanIDs.append(net.get('provider:segmentation_id'))
+            elif net.get('segments'):
+                for segment in net.get('segments'):
+                    if segment.get('provider:network_type') == "vlan" and segment.get('provider:segmentation_id'):
+                        usedVlanIDs.append(segment.get('provider:segmentation_id'))
+        used_vlanIDs = set(usedVlanIDs)
+
+        #find unused VLAN ID
+        for vlanID_range in self.config.get('multisegment_vlan_range'):
+            try:
+                start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+                for vlanID in range(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+        else:
+            raise vimconn.vimconnConflictException("Unable to create the VLAN segment."\
+                " All VLAN IDs {} are in use.".format(self.config.get('multisegment_vlan_range')))
+
+
+    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
+        """
+        Method to validate user given vlanID ranges
+            Args:  None
+            Returns: None
+        """
+        for vlanID_range in input_vlan_range:
+            vlan_range = vlanID_range.replace(" ", "")
+            #validate format
+            vlanID_pattern = r'(\d)*-(\d)*$'
+            match_obj = re.match(vlanID_pattern, vlan_range)
+            if not match_obj:
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}.You must provide "\
+                "'{}' in format [start_ID - end_ID].".format(text_vlan_range, vlanID_range, text_vlan_range))
+
+            start_vlanid , end_vlanid = map(int,vlan_range.split("-"))
+            if start_vlanid <= 0 :
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                "Start ID can not be zero. For VLAN "\
+                "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
+            if end_vlanid > 4094 :
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                "End VLAN ID can not be greater than 4094. For VLAN "\
+                "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
+
+            if start_vlanid > end_vlanid:
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                    "You must provide '{}' in format start_ID - end_ID and "\
+                    "start_ID < end_ID ".format(text_vlan_range, vlanID_range, text_vlan_range))
+
+#NOT USED FUNCTIONS
+
+    def new_external_port(self, port_data):
+        #TODO openstack if needed
+        '''Adds a external port to VIM'''
+        '''Returns the port identifier'''
+        return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
+
+    def connect_port_network(self, port_id, network_id, admin=False):
+        #TODO openstack if needed
+        '''Connects a external port to a network'''
+        '''Returns status code of the VIM response'''
+        return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
+
+    def new_user(self, user_name, user_passwd, tenant_id=None):
+        '''Adds a new user to openstack VIM'''
+        '''Returns the user identifier'''
+        self.logger.debug("osconnector: Adding a new user to VIM")
+        try:
+            self._reload_connection()
+            user=self.keystone.users.create(user_name, password=user_passwd, default_project=tenant_id)
+            #self.keystone.tenants.add_user(self.k_creds["username"], #role)
+            return user.id
+        except ksExceptions.ConnectionError as e:
+            error_value=-vimconn.HTTP_Bad_Request
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        except ksExceptions.ClientException as e: #TODO remove
+            error_value=-vimconn.HTTP_Bad_Request
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        #TODO insert exception vimconn.HTTP_Unauthorized
+        #if reaching here is because an exception
+        self.logger.debug("new_user " + error_text)
+        return error_value, error_text
+
+    def delete_user(self, user_id):
+        '''Delete a user from openstack VIM'''
+        '''Returns the user identifier'''
+        if self.debug:
+            print("osconnector: Deleting  a  user from VIM")
+        try:
+            self._reload_connection()
+            self.keystone.users.delete(user_id)
+            return 1, user_id
+        except ksExceptions.ConnectionError as e:
+            error_value=-vimconn.HTTP_Bad_Request
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        except ksExceptions.NotFound as e:
+            error_value=-vimconn.HTTP_Not_Found
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        except ksExceptions.ClientException as e: #TODO remove
+            error_value=-vimconn.HTTP_Bad_Request
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        #TODO insert exception vimconn.HTTP_Unauthorized
+        #if reaching here is because an exception
+            self.logger.debug("delete_tenant " + error_text)
+        return error_value, error_text
+
+    def get_hosts_info(self):
+        '''Get the information of deployed hosts
+        Returns the hosts content'''
+        if self.debug:
+            print("osconnector: Getting Host info from VIM")
+        try:
+            h_list=[]
+            self._reload_connection()
+            hypervisors = self.nova.hypervisors.list()
+            for hype in hypervisors:
+                h_list.append( hype.to_dict() )
+            return 1, {"hosts":h_list}
+        except nvExceptions.NotFound as e:
+            error_value=-vimconn.HTTP_Not_Found
+            error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
+        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+            error_value=-vimconn.HTTP_Bad_Request
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        #TODO insert exception vimconn.HTTP_Unauthorized
+        #if reaching here is because an exception
+        self.logger.debug("get_hosts_info " + error_text)
+        return error_value, error_text
+
+    def get_hosts(self, vim_tenant):
+        '''Get the hosts and deployed instances
+        Returns the hosts content'''
+        r, hype_dict = self.get_hosts_info()
+        if r<0:
+            return r, hype_dict
+        hypervisors = hype_dict["hosts"]
+        try:
+            servers = self.nova.servers.list()
+            for hype in hypervisors:
+                for server in servers:
+                    if server.to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']==hype['hypervisor_hostname']:
+                        if 'vm' in hype:
+                            hype['vm'].append(server.id)
+                        else:
+                            hype['vm'] = [server.id]
+            return 1, hype_dict
+        except nvExceptions.NotFound as e:
+            error_value=-vimconn.HTTP_Not_Found
+            error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
+        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+            error_value=-vimconn.HTTP_Bad_Request
+            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+        #TODO insert exception vimconn.HTTP_Unauthorized
+        #if reaching here is because an exception
+        self.logger.debug("get_hosts " + error_text)
+        return error_value, error_text
+
+    def new_classification(self, name, ctype, definition):
+        self.logger.debug('Adding a new (Traffic) Classification to VIM, named %s', name)
+        try:
+            new_class = None
+            self._reload_connection()
+            if ctype not in supportedClassificationTypes:
+                raise vimconn.vimconnNotSupportedException(
+                        'OpenStack VIM connector doesn\'t support provided '
+                        'Classification Type {}, supported ones are: '
+                        '{}'.format(ctype, supportedClassificationTypes))
+            if not self._validate_classification(ctype, definition):
+                raise vimconn.vimconnException(
+                    'Incorrect Classification definition '
+                    'for the type specified.')
+            classification_dict = definition
+            classification_dict['name'] = name
+
+            new_class = self.neutron.create_sfc_flow_classifier(
+                {'flow_classifier': classification_dict})
+            return new_class['flow_classifier']['id']
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            self.logger.error(
+                'Creation of Classification failed.')
+            self._format_exception(e)
+
+    def get_classification(self, class_id):
+        self.logger.debug(" Getting Classification %s from VIM", class_id)
+        filter_dict = {"id": class_id}
+        class_list = self.get_classification_list(filter_dict)
+        if len(class_list) == 0:
+            raise vimconn.vimconnNotFoundException(
+                "Classification '{}' not found".format(class_id))
+        elif len(class_list) > 1:
+            raise vimconn.vimconnConflictException(
+                "Found more than one Classification with this criteria")
+        classification = class_list[0]
+        return classification
+
+    def get_classification_list(self, filter_dict={}):
+        self.logger.debug("Getting Classifications from VIM filter: '%s'",
+                          str(filter_dict))
+        try:
+            filter_dict_os = filter_dict.copy()
+            self._reload_connection()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            classification_dict = self.neutron.list_sfc_flow_classifiers(
+                **filter_dict_os)
+            classification_list = classification_dict["flow_classifiers"]
+            self.__classification_os2mano(classification_list)
+            return classification_list
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def delete_classification(self, class_id):
+        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_flow_classifier(class_id)
+            return class_id
+        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                ksExceptions.ClientException, neExceptions.NeutronException,
+                ConnectionError) as e:
+            self._format_exception(e)
+
+    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+        self.logger.debug("Adding a new Service Function Instance to VIM, named '%s'", name)
+        try:
+            new_sfi = None
+            self._reload_connection()
+            correlation = None
+            if sfc_encap:
+                correlation = 'nsh'
+            if len(ingress_ports) != 1:
+                raise vimconn.vimconnNotSupportedException(
+                    "OpenStack VIM connector can only have "
+                    "1 ingress port per SFI")
+            if len(egress_ports) != 1:
+                raise vimconn.vimconnNotSupportedException(
+                    "OpenStack VIM connector can only have "
+                    "1 egress port per SFI")
+            sfi_dict = {'name': name,
+                        'ingress': ingress_ports[0],
+                        'egress': egress_ports[0],
+                        'service_function_parameters': {
+                            'correlation': correlation}}
+            new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
+            return new_sfi['port_pair']['id']
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            if new_sfi:
+                try:
+                    self.neutron.delete_sfc_port_pair(
+                        new_sfi['port_pair']['id'])
+                except Exception:
+                    self.logger.error(
+                        'Creation of Service Function Instance failed, with '
+                        'subsequent deletion failure as well.')
+            self._format_exception(e)
+
+    def get_sfi(self, sfi_id):
+        self.logger.debug('Getting Service Function Instance %s from VIM', sfi_id)
+        filter_dict = {"id": sfi_id}
+        sfi_list = self.get_sfi_list(filter_dict)
+        if len(sfi_list) == 0:
+            raise vimconn.vimconnNotFoundException("Service Function Instance '{}' not found".format(sfi_id))
+        elif len(sfi_list) > 1:
+            raise vimconn.vimconnConflictException(
+                'Found more than one Service Function Instance '
+                'with this criteria')
+        sfi = sfi_list[0]
+        return sfi
+
+    def get_sfi_list(self, filter_dict={}):
+        self.logger.debug("Getting Service Function Instances from VIM filter: '%s'", str(filter_dict))
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
+            sfi_list = sfi_dict["port_pairs"]
+            self.__sfi_os2mano(sfi_list)
+            return sfi_list
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def delete_sfi(self, sfi_id):
+        self.logger.debug("Deleting Service Function Instance '%s' "
+                          "from VIM", sfi_id)
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_port_pair(sfi_id)
+            return sfi_id
+        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                ksExceptions.ClientException, neExceptions.NeutronException,
+                ConnectionError) as e:
+            self._format_exception(e)
+
+    def new_sf(self, name, sfis, sfc_encap=True):
+        self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+        try:
+            new_sf = None
+            self._reload_connection()
+            # correlation = None
+            # if sfc_encap:
+            #     correlation = 'nsh'
+            for instance in sfis:
+                sfi = self.get_sfi(instance)
+                if sfi.get('sfc_encap') != sfc_encap:
+                    raise vimconn.vimconnNotSupportedException(
+                        "OpenStack VIM connector requires all SFIs of the "
+                        "same SF to share the same SFC Encapsulation")
+            sf_dict = {'name': name,
+                       'port_pairs': sfis}
+            new_sf = self.neutron.create_sfc_port_pair_group({
+                'port_pair_group': sf_dict})
+            return new_sf['port_pair_group']['id']
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            if new_sf:
+                try:
+                    self.neutron.delete_sfc_port_pair_group(
+                        new_sf['port_pair_group']['id'])
+                except Exception:
+                    self.logger.error(
+                        'Creation of Service Function failed, with '
+                        'subsequent deletion failure as well.')
+            self._format_exception(e)
+
+    def get_sf(self, sf_id):
+        self.logger.debug("Getting Service Function %s from VIM", sf_id)
+        filter_dict = {"id": sf_id}
+        sf_list = self.get_sf_list(filter_dict)
+        if len(sf_list) == 0:
+            raise vimconn.vimconnNotFoundException(
+                "Service Function '{}' not found".format(sf_id))
+        elif len(sf_list) > 1:
+            raise vimconn.vimconnConflictException(
+                "Found more than one Service Function with this criteria")
+        sf = sf_list[0]
+        return sf
+
+    def get_sf_list(self, filter_dict={}):
+        self.logger.debug("Getting Service Function from VIM filter: '%s'",
+                          str(filter_dict))
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
+            sf_list = sf_dict["port_pair_groups"]
+            self.__sf_os2mano(sf_list)
+            return sf_list
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def delete_sf(self, sf_id):
+        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_port_pair_group(sf_id)
+            return sf_id
+        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                ksExceptions.ClientException, neExceptions.NeutronException,
+                ConnectionError) as e:
+            self._format_exception(e)
+
+    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+        self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+        try:
+            new_sfp = None
+            self._reload_connection()
+            # In networking-sfc the MPLS encapsulation is legacy
+            # should be used when no full SFC Encapsulation is intended
+            correlation = 'mpls'
+            if sfc_encap:
+                correlation = 'nsh'
+            sfp_dict = {'name': name,
+                        'flow_classifiers': classifications,
+                        'port_pair_groups': sfs,
+                        'chain_parameters': {'correlation': correlation}}
+            if spi:
+                sfp_dict['chain_id'] = spi
+            new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
+            return new_sfp["port_chain"]["id"]
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            if new_sfp:
+                try:
+                    self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
+                except Exception:
+                    self.logger.error(
+                        'Creation of Service Function Path failed, with '
+                        'subsequent deletion failure as well.')
+            self._format_exception(e)
+
+    def get_sfp(self, sfp_id):
+        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+        filter_dict = {"id": sfp_id}
+        sfp_list = self.get_sfp_list(filter_dict)
+        if len(sfp_list) == 0:
+            raise vimconn.vimconnNotFoundException(
+                "Service Function Path '{}' not found".format(sfp_id))
+        elif len(sfp_list) > 1:
+            raise vimconn.vimconnConflictException(
+                "Found more than one Service Function Path with this criteria")
+        sfp = sfp_list[0]
+        return sfp
+
+    def get_sfp_list(self, filter_dict={}):
+        self.logger.debug("Getting Service Function Paths from VIM filter: '%s'", str(filter_dict))
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
+            sfp_list = sfp_dict["port_chains"]
+            self.__sfp_os2mano(sfp_list)
+            return sfp_list
+        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                neExceptions.NeutronException, ConnectionError) as e:
+            self._format_exception(e)
+
+    def delete_sfp(self, sfp_id):
+        self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_port_chain(sfp_id)
+            return sfp_id
+        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                ksExceptions.ClientException, neExceptions.NeutronException,
+                ConnectionError) as e:
+            self._format_exception(e)
diff --git a/RO-VIM-openstack/requirements.txt b/RO-VIM-openstack/requirements.txt
new file mode 100644 (file)
index 0000000..e0cb62f
--- /dev/null
@@ -0,0 +1,27 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+python-openstackclient
+python-neutronclient
+requests
+netaddr
+#TODO py3 networking-l2gw
+#TODO py3 python-novaclient
+#TODO py3 python-keystoneclient
+#TODO py3 python-glanceclient
+#TODO py3 python-cinderclient
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
diff --git a/RO-VIM-openstack/setup.py b/RO-VIM-openstack/setup.py
new file mode 100644 (file)
index 0000000..1b3deba
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_openstack"
+
+README = """
+===========
+osm-rovim_openstack
+===========
+
+osm-ro pluging for openstack VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for openstack',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    author_email='alfonso.tiernosepulveda@telefonica.com',
+    maintainer='Alfonso Tierno',
+    maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=[
+        "python-openstackclient", "python-neutronclient", 
+        "requests", "netaddr", "PyYAML",
+        "osm-ro", # TODO py3 "networking-l2gw"
+        # "python-novaclient", "python-keystoneclient", "python-glanceclient", "python-cinderclient",
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_openstack = osm_rovim_openstack.vimconn_openstack'],
+    },
+)
diff --git a/RO-VIM-openstack/stdeb.cfg b/RO-VIM-openstack/stdeb.cfg
new file mode 100644 (file)
index 0000000..ef5f94d
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-openstackclient, python3-neutronclient, python3-requests, python3-netaddr, python3-yaml,
+          python3-osm-ro, python3-pip
+          # TODO py3 python3-networking-l2gw
diff --git a/RO-VIM-openstack/tox.ini b/RO-VIM-openstack/tox.ini
new file mode 100644 (file)
index 0000000..2a67756
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_openstack --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_openstack.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-VIM-openvim/Makefile b/RO-VIM-openvim/Makefile
new file mode 100644 (file)
index 0000000..8c68832
--- /dev/null
@@ -0,0 +1,24 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_openvim-*.tar.gz osm_rovim_openvim.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cd deb_dist/osm-rovim-openvim*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py b/RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py
new file mode 100644 (file)
index 0000000..c89a303
--- /dev/null
@@ -0,0 +1,1381 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+vimconnector implements all the methods to interact with openvim using the openvim API.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$26-aug-2014 11:09:29$"
+
+from osm_ro import vimconn
+import requests
+import json
+import yaml
+import logging
+import math
+from osm_ro.openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
+                            vlan1000_schema, integer0_schema
+from jsonschema import validate as js_v, exceptions as js_e
+from urllib.parse import quote
+
+'''contain the openvim virtual machine status to openmano status'''
+vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+                     'PAUSED':'PAUSED',
+                     'SUSPENDED': 'SUSPENDED',
+                     'INACTIVE':'INACTIVE',
+                     'CREATING':'BUILD',
+                     'ERROR':'ERROR','DELETED':'DELETED'
+                     }
+netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
+                     }
+
+
+host_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id"]
+}
+image_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+server_schema = {
+    "type":"object",
+    "properties":{
+        "id":id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+new_host_response_schema = {
+    "title":"host response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "host": host_schema
+    },
+    "required": ["host"],
+    "additionalProperties": False
+}
+
+get_images_response_schema = {
+    "title":"openvim images response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "images":{
+            "type":"array",
+            "items": image_schema,
+        }
+    },
+    "required": ["images"],
+    "additionalProperties": False
+}
+
+get_hosts_response_schema = {
+    "title":"openvim hosts response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "hosts":{
+            "type":"array",
+            "items": host_schema,
+        }
+    },
+    "required": ["hosts"],
+    "additionalProperties": False
+}
+
+get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
+
+get_server_response_schema = {
+    "title":"openvim server response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "servers":{
+            "type":"array",
+            "items": server_schema,
+        }
+    },
+    "required": ["servers"],
+    "additionalProperties": False
+}
+
+new_tenant_response_schema = {
+    "title":"tenant response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "id": id_schema,
+                "name": nameshort_schema,
+                "description":description_schema,
+                "enabled":{"type" : "boolean"}
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+new_network_response_schema = {
+    "title":"network response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "network":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":name_schema,
+                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+                "shared":{"type":"boolean"},
+                "tenant_id":id_schema,
+                "admin_state_up":{"type":"boolean"},
+                "vlan":vlan1000_schema
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["network"],
+    "additionalProperties": False
+}
+
+
+# get_network_response_schema = {
+#     "title":"get network response information schema",
+#     "$schema": "http://json-schema.org/draft-04/schema#",
+#     "type":"object",
+#     "properties":{
+#         "network":{
+#             "type":"object",
+#             "properties":{
+#                 "id":id_schema,
+#                 "name":name_schema,
+#                 "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+#                 "shared":{"type":"boolean"},
+#                 "tenant_id":id_schema,
+#                 "admin_state_up":{"type":"boolean"},
+#                 "vlan":vlan1000_schema
+#             },
+#             "required": ["id"]
+#         }
+#     },
+#     "required": ["network"],
+#     "additionalProperties": False
+# }
+
+
+new_port_response_schema = {
+    "title":"port response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "port":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["port"],
+    "additionalProperties": False
+}
+
+get_flavor_response_schema = {
+    "title":"openvim flavors response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":   id_schema,
+                "name": name_schema,
+                "extended": {"type":"object"},
+            },
+            "required": ["id", "name"],
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+new_flavor_response_schema = {
+    "title":"flavor response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+get_image_response_schema = {
+    "title":"openvim images response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":   id_schema,
+                "name": name_schema,
+            },
+            "required": ["id", "name"],
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+new_image_response_schema = {
+    "title":"image response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+
+new_vminstance_response_schema = {
+    "title":"server response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "server":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["server"],
+    "additionalProperties": False
+}
+
+get_processor_rankings_response_schema = {
+    "title":"processor rankings information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "rankings":{
+            "type":"array",
+            "items":{
+                "type":"object",
+                "properties":{
+                    "model": description_schema,
+                    "value": integer0_schema
+                },
+                "additionalProperties": False,
+                "required": ["model","value"]
+            }
+        },
+        "additionalProperties": False,
+        "required": ["rankings"]
+    }
+}
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                 log_level="DEBUG", config={}, persistent_info={}):
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+        self.tenant = None
+        self.headers_req = {'content-type': 'application/json'}
+        self.logger = logging.getLogger('openmano.vim.openvim')
+        self.persistent_info = persistent_info
+        if tenant_id:
+            self.tenant = tenant_id
+
+    def __setitem__(self,index, value):
+        '''Set individuals parameters 
+        Throw TypeError, KeyError
+        '''
+        if index=='tenant_id':
+            self.tenant = value
+        elif index=='tenant_name':
+            self.tenant = None
+        vimconn.vimconnector.__setitem__(self,index, value)    
+
+    def _get_my_tenant(self):
+        '''Obtain uuid of my tenant from name
+        '''
+        if self.tenant:
+            return self.tenant
+
+        url = self.url+'/tenants?name='+ quote(self.tenant_name)
+        self.logger.info("Getting VIM tenant_id GET %s", url)
+        vim_response = requests.get(url, headers = self.headers_req)
+        self._check_http_request_response(vim_response)
+        try:
+            tenant_list = vim_response.json()["tenants"]
+            if len(tenant_list) == 0:
+                raise vimconn.vimconnNotFoundException("No tenant found for name '{}'".format(self.tenant_name))
+            elif len(tenant_list) > 1:
+                raise vimconn.vimconnConflictException ("More that one tenant found for name '{}'".format(self.tenant_name))
+            self.tenant = tenant_list[0]["id"]
+            return self.tenant
+        except Exception as e:
+            raise vimconn.vimconnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
+
+    def _format_jsonerror(self,http_response):
+        #DEPRECATED, to delete in the future
+        try:
+            data = http_response.json()
+            return data["error"]["description"]
+        except:
+            return http_response.text
+
+    def _format_in(self, http_response, schema):
+        #DEPRECATED, to delete in the future
+        try:
+            client_data = http_response.json()
+            js_v(client_data, schema)
+            #print "Input data: ", str(client_data)
+            return True, client_data
+        except js_e.ValidationError as exc:
+            print("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+            return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+    
+    def _remove_extra_items(self, data, schema):
+        deleted=[]
+        if type(data) is tuple or type(data) is list:
+            for d in data:
+                a= self._remove_extra_items(d, schema['items'])
+                if a is not None: deleted.append(a)
+        elif type(data) is dict:
+            for k in data.keys():
+                if 'properties' not in schema or k not in schema['properties'].keys():
+                    del data[k]
+                    deleted.append(k)
+                else:
+                    a = self._remove_extra_items(data[k], schema['properties'][k])
+                    if a is not None:  deleted.append({k:a})
+        if len(deleted) == 0: return None
+        elif len(deleted) == 1: return deleted[0]
+        else: return deleted
+        
+    def _format_request_exception(self, request_exception):
+        '''Transform a request exception into a vimconn exception'''
+        if isinstance(request_exception, js_e.ValidationError):
+            raise vimconn.vimconnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))            
+        elif isinstance(request_exception, requests.exceptions.HTTPError):
+            raise vimconn.vimconnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
+        else:
+            raise vimconn.vimconnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
+
+    def _check_http_request_response(self, request_response):
+        '''Raise a vimconn exception if the response is not Ok'''
+        if request_response.status_code >= 200 and  request_response.status_code < 300:
+            return
+        if request_response.status_code == vimconn.HTTP_Unauthorized:
+            raise vimconn.vimconnAuthException(request_response.text)
+        elif request_response.status_code == vimconn.HTTP_Not_Found:
+            raise vimconn.vimconnNotFoundException(request_response.text)
+        elif request_response.status_code == vimconn.HTTP_Conflict:
+            raise vimconn.vimconnConflictException(request_response.text)
+        else: 
+            raise vimconn.vimconnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
+
+    def new_tenant(self,tenant_name,tenant_description):
+        '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+        #print "VIMConnector: Adding a new tenant to VIM"
+        payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
+        payload_req = json.dumps(payload_dict)
+        try:
+            url = self.url_admin+'/tenants'
+            self.logger.info("Adding a new tenant %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_tenant_response_schema)
+            #r = self._remove_extra_items(response, new_tenant_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            tenant_id = response['tenant']['id']
+            return tenant_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def delete_tenant(self,tenant_id):
+        '''Delete a tenant from VIM. Returns the old tenant identifier'''
+        try:
+            url = self.url_admin+'/tenants/'+tenant_id
+            self.logger.info("Delete a tenant DELETE %s", url)
+            vim_response = requests.delete(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return tenant_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_tenant_list(self, filter_dict={}):
+        '''Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+        '''
+        filterquery=[]
+        filterquery_text=''
+        for k,v in filter_dict.items():
+            filterquery.append(str(k)+'='+str(v))
+        if len(filterquery)>0:
+            filterquery_text='?'+ '&'.join(filterquery)
+        try:
+            url = self.url+'/tenants'+filterquery_text
+            self.logger.info("get_tenant_list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return vim_response.json()["tenants"]
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+
+    def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None): #, **vim_specific):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        try:
+            created_items = {}
+            self._get_my_tenant()
+            if net_type=="bridge":
+                net_type="bridge_data"
+            payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
+            if vlan:
+                payload_req["provider:vlan"] = vlan
+            # payload_req.update(vim_specific)
+            url = self.url+'/networks'
+            self.logger.info("Adding a new network POST: %s  DATA: %s", url, str(payload_req))
+            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_network_response_schema)
+            #r = self._remove_extra_items(response, new_network_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            network_id = response['network']['id']
+            return network_id, created_items
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def get_network_list(self, filter_dict={}):
+        '''Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            public: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries
+        '''
+        try:
+            if 'tenant_id' not in filter_dict:
+                filter_dict["tenant_id"] = self._get_my_tenant()
+            elif not filter_dict["tenant_id"]:
+                del filter_dict["tenant_id"]
+            filterquery=[]
+            filterquery_text=''
+            for k,v in filter_dict.items():
+                filterquery.append(str(k)+'='+str(v))
+            if len(filterquery)>0:
+                filterquery_text='?'+ '&'.join(filterquery)
+            url = self.url+'/networks'+filterquery_text
+            self.logger.info("Getting network list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['networks']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_network(self, net_id):
+        '''Obtain network details of network id'''
+        try:
+            url = self.url+'/networks/'+net_id
+            self.logger.info("Getting network GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['network']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+            
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+        try:
+            self._get_my_tenant()
+            url = self.url+'/networks/'+net_id
+            self.logger.info("Deleting VIM network DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return net_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_flavor(self, flavor_id):
+        '''Obtain flavor details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+            self.logger.info("Getting flavor GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_flavor_response_schema)
+            r = self._remove_extra_items(response, get_flavor_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['flavor']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def new_flavor(self, flavor_data):
+        '''Adds a tenant flavor to VIM'''
+        '''Returns the flavor identifier'''
+        try:
+            new_flavor_dict = flavor_data.copy()
+            for device in new_flavor_dict.get('extended', {}).get('devices', ()):
+                if 'image name' in device:
+                    del device['image name']
+                if 'name' in device:
+                    del device['name']
+            numas = new_flavor_dict.get('extended', {}).get('numas')
+            if numas:
+                numa = numas[0]
+                # translate memory, cpus to EPA
+                if "cores" not in numa and "threads" not in numa and "paired-threads" not in numa:
+                    numa["paired-threads"] = new_flavor_dict["vcpus"]
+                if "memory" not in numa:
+                    numa["memory"] = int(math.ceil(new_flavor_dict["ram"] / 1024.0))
+                for iface in numa.get("interfaces", ()):
+                    if not iface.get("bandwidth"):
+                        iface["bandwidth"] = "1 Mbps"
+
+            new_flavor_dict["name"] = flavor_data["name"][:64]
+            self._get_my_tenant()
+            payload_req = json.dumps({'flavor': new_flavor_dict})
+            url = self.url+'/'+self.tenant+'/flavors'
+            self.logger.info("Adding a new VIM flavor POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_flavor_response_schema)
+            r = self._remove_extra_items(response, new_flavor_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            flavor_id = response['flavor']['id']
+            return flavor_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def delete_flavor(self,flavor_id):
+        '''Deletes a tenant flavor from VIM'''
+        '''Returns the old flavor_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+            self.logger.info("Deleting VIM flavor DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return flavor_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image(self, image_id):
+        '''Obtain image details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/images/'+image_id
+            self.logger.info("Getting image GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_image_response_schema)
+            r = self._remove_extra_items(response, get_image_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['image']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def new_image(self,image_dict):
+        ''' Adds a tenant image to VIM, returns image_id'''
+        try:
+            self._get_my_tenant()
+            new_image_dict={'name': image_dict['name'][:64]}
+            if image_dict.get('description'):
+                new_image_dict['description'] = image_dict['description']
+            if image_dict.get('metadata'):
+                new_image_dict['metadata'] = yaml.load(image_dict['metadata'], Loader=yaml.SafeLoader)
+            if image_dict.get('location'):
+                new_image_dict['path'] = image_dict['location']
+            payload_req = json.dumps({"image":new_image_dict})
+            url=self.url + '/' + self.tenant + '/images'
+            self.logger.info("Adding a new VIM image POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_image_response_schema)
+            r = self._remove_extra_items(response, new_image_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            image_id = response['image']['id']
+            return image_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+            
+    def delete_image(self, image_id):
+        '''Deletes a tenant image from VIM'''
+        '''Returns the deleted image_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url + '/'+ self.tenant +'/images/'+image_id
+            self.logger.info("Deleting VIM image DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return image_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image_id_from_path(self, path):
+        '''Get the image id from image path in the VIM database. Returns the image_id'''
+        try:
+            self._get_my_tenant()
+            url=self.url + '/' + self.tenant + '/images?path='+quote(path)
+            self.logger.info("Getting images GET %s", url)
+            vim_response = requests.get(url)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_images_response_schema)
+            #r = self._remove_extra_items(response, get_images_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            if len(response['images'])==0:
+                raise vimconn.vimconnNotFoundException("Image not found at VIM with path '{}'".format(path))
+            elif len(response['images'])>1:
+                raise vimconn.vimconnConflictException("More than one image found at VIM with path '{}'".format(path))
+            return response['images'][0]['id']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image_list(self, filter_dict={}):
+        '''Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+        try:
+            self._get_my_tenant()
+            filterquery=[]
+            filterquery_text=''
+            for k,v in filter_dict.items():
+                filterquery.append(str(k)+'='+str(v))
+            if len(filterquery)>0:
+                filterquery_text='?'+ '&'.join(filterquery)
+            url = self.url+'/'+self.tenant+'/images'+filterquery_text
+            self.logger.info("Getting image list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['images']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def new_vminstancefromJSON(self, vm_data):
+        '''Adds a VM instance to VIM'''
+        '''Returns the instance identifier'''
+        try:
+            self._get_my_tenant()
+        except Exception as e:
+            return -vimconn.HTTP_Not_Found, str(e)
+        print("VIMConnector: Adding a new VM instance from JSON to VIM")
+        payload_req = vm_data
+        try:
+            vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            print( "new_vminstancefromJSON Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        # print vim_response
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_image_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_image_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                vminstance_id = http_content['server']['id']
+                print( "Tenant image id: ",vminstance_id)
+                return vim_response.status_code,vminstance_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "{}": not possible to add new vm instance. HTTP Response: {}. Error: {}'.format(
+                self.url, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                       availability_zone_index=None, availability_zone_list=None):
+        """Adds a VM instance to VIM
+        Params:
+            start: indicates if VM must start or boot in pause mode. Ignored
+            image_id,flavor_id: image and flavor uuid
+            net_list: list of interfaces, each one is a dictionary with:
+                name:
+                net_id: network uuid to connect
+                vpci: virtual vcpi to assign
+                model: interface model, virtio, e1000, ...
+                mac_address: 
+                use: 'data', 'bridge',  'mgmt'
+                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                vim_id: filled/added by this function
+                #TODO ip, security groups
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
+        try:
+            self._get_my_tenant()
+#            net_list = []
+#            for k,v in net_dict.items():
+#                print k,v
+#                net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
+#            net_list_string = ', '.join(net_list) 
+            virtio_net_list=[]
+            for net in net_list:
+                if not net.get("net_id"):
+                    continue
+                net_dict = {'uuid': net["net_id"]}
+                if net.get("type"):
+                    if net["type"] == "SR-IOV":
+                        net_dict["type"] = "VF"
+                    elif net["type"] == "PCI-PASSTHROUGH":
+                        net_dict["type"] = "PF"
+                    else:
+                        net_dict["type"] = net["type"]
+                if net.get("name"):
+                    net_dict["name"] = net["name"]
+                if net.get("vpci"):
+                    net_dict["vpci"] = net["vpci"]
+                if net.get("model"):
+                    if net["model"] == "VIRTIO" or net["model"] == "paravirt":
+                        net_dict["model"] = "virtio"
+                    else:
+                        net_dict["model"] = net["model"]
+                if net.get("mac_address"):
+                    net_dict["mac_address"] = net["mac_address"]
+                if net.get("ip_address"):
+                    net_dict["ip_address"] = net["ip_address"]
+                virtio_net_list.append(net_dict)
+            payload_dict={  "name":        name[:64],
+                            "description": description,
+                            "imageRef":    image_id,
+                            "flavorRef":   flavor_id,
+                            "networks": virtio_net_list
+                        }
+            if start != None:
+                payload_dict["start"] = start
+            payload_req = json.dumps({"server": payload_dict})
+            url = self.url+'/'+self.tenant+'/servers'
+            self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_vminstance_response_schema)
+            #r = self._remove_extra_items(response, new_vminstance_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            vminstance_id = response['server']['id']
+
+            #connect data plane interfaces to network
+            for net in net_list:
+                if net["type"]=="virtual":
+                    if not net.get("net_id"):
+                        continue
+                    for iface in response['server']['networks']:
+                        if "name" in net:
+                            if net["name"]==iface["name"]:
+                                net["vim_id"] = iface['iface_id']
+                                break
+                        elif "net_id" in net:
+                            if net["net_id"]==iface["net_id"]:
+                                net["vim_id"] = iface['iface_id']
+                                break
+                else: #dataplane
+                    for numa in response['server'].get('extended',{}).get('numas',() ):
+                        for iface in numa.get('interfaces',() ):
+                            if net['name'] == iface['name']:
+                                net['vim_id'] = iface['iface_id']
+                                #Code bellow is not needed, current openvim connect dataplane interfaces 
+                                #if net.get("net_id"):
+                                ##connect dataplane interface
+                                #    result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
+                                #    if result < 0:
+                                #        error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
+                                #        print "new_vminstance: " + error_text
+                                #        self.delete_vminstance(vminstance_id)
+                                #        return result, error_text
+                                break
+        
+            return vminstance_id, None
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def get_vminstance(self, vm_id):
+        '''Returns the VM instance information from VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id
+            self.logger.info("Getting vm GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_vminstance_response_schema)
+            #r = self._remove_extra_items(response, new_vminstance_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['server']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def delete_vminstance(self, vm_id, created_items=None):
+        '''Removes a VM instance from VIM, returns the deleted vm_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id
+            self.logger.info("Deleting VIM vm DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return vm_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def refresh_vms_status(self, vm_list):
+        '''Refreshes the status of the virtual machines'''
+        try:
+            self._get_my_tenant()
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+        vm_dict={}
+        for vm_id in vm_list:
+            vm={}
+            #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
+            try:
+                url = self.url + '/' + self.tenant + '/servers/' + vm_id
+                self.logger.info("Getting vm GET %s", url)
+                vim_response = requests.get(url, headers = self.headers_req)
+                self._check_http_request_response(vim_response)
+                response = vim_response.json()
+                js_v(response, new_vminstance_response_schema)
+                if response['server']['status'] in vmStatus2manoFormat:
+                    vm['status'] = vmStatus2manoFormat[ response['server']['status']  ]
+                else:
+                    vm['status'] = "OTHER"
+                    vm['error_msg'] = "VIM status reported " + response['server']['status']
+                if response['server'].get('last_error'):
+                    vm['error_msg'] = response['server']['last_error']
+                vm["vim_info"] = yaml.safe_dump(response['server'])
+                #get interfaces info
+                try:
+                    management_ip = False
+                    url2 = self.url + '/ports?device_id=' + quote(vm_id)
+                    self.logger.info("Getting PORTS GET %s", url2)
+                    vim_response2 = requests.get(url2, headers = self.headers_req)
+                    self._check_http_request_response(vim_response2)
+                    client_data = vim_response2.json()
+                    if isinstance(client_data.get("ports"), list):
+                        vm["interfaces"]=[]
+                    for port in client_data.get("ports"):
+                        interface={}
+                        interface['vim_info'] = yaml.safe_dump(port)
+                        interface["mac_address"] = port.get("mac_address")
+                        interface["vim_net_id"] = port.get("network_id")
+                        interface["vim_interface_id"] = port["id"]
+                        interface["ip_address"] = port.get("ip_address")
+                        if interface["ip_address"]:
+                            management_ip = True
+                        if interface["ip_address"] == "0.0.0.0":
+                            interface["ip_address"] = None
+                        vm["interfaces"].append(interface)
+                        
+                except Exception as e:
+                    self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
+
+                if vm['status'] == "ACTIVE" and not management_ip:
+                    vm['status'] = "ACTIVE:NoMgmtIP"
+                    
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "DELETED"
+                vm['error_msg'] = str(e)
+            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "VIM_ERROR"
+                vm['error_msg'] = str(e)
+            vm_dict[vm_id] = vm
+        return vm_dict
+
+    def refresh_nets_status(self, net_list):
+        '''Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        '''
+        try:
+            self._get_my_tenant()
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+        
+        net_dict={}
+        for net_id in net_list:
+            net = {}
+            #print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
+            try:
+                net_vim = self.get_network(net_id)
+                if net_vim['status'] in netStatus2manoFormat:
+                    net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+                else:
+                    net["status"] = "OTHER"
+                    net["error_msg"] = "VIM status reported " + net_vim['status']
+                    
+                if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
+                    net["status"] = "DOWN"
+                if net_vim.get('last_error'):
+                    net['error_msg'] = net_vim['last_error']
+                net["vim_info"] = yaml.safe_dump(net_vim)
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "DELETED"
+                net['error_msg'] = str(e)
+            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "VIM_ERROR"
+                net['error_msg'] = str(e)
+            net_dict[net_id] = net
+        return net_dict
+    
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        '''Send and action over a VM instance from VIM'''
+        '''Returns the status'''
+        try:
+            self._get_my_tenant()
+            if "console" in action_dict:
+                raise vimconn.vimconnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
+            self.logger.info("Action over VM instance POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
+            self._check_http_request_response(vim_response)
+            return None
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+#NOT USED METHODS in current version        
+  
+    def host_vim2gui(self, host, server_dict):
+        '''Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        '''
+        if type(server_dict) is not dict: 
+            print( 'vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary')
+            return
+        RAD={}
+        occupation={}
+        for numa in host['host']['numas']:
+            RAD_item={}
+            occupation_item={}
+            #memory
+            RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
+            occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
+            #cpus
+            RAD_item['cpus']={}
+            RAD_item['cpus']['cores'] = []
+            RAD_item['cpus']['eligible_cores'] = []
+            occupation_item['cores']=[]
+            for _ in range(0, len(numa['cores']) // 2):
+                RAD_item['cpus']['cores'].append( [] )
+            for core in numa['cores']:
+                RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
+                if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
+                if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
+            #ports
+            RAD_item['ports']={}
+            occupation_item['ports']={}
+            for iface in numa['interfaces']:
+                RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
+                occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] // iface['Mbps']) + "%" }
+                
+            RAD[ numa['numa_socket'] ] = RAD_item
+            occupation[ numa['numa_socket'] ] = occupation_item
+        server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
+
+    def get_hosts_info(self):
+        '''Get the information of deployed hosts
+        Returns the hosts content'''
+    #obtain hosts list
+        url=self.url+'/hosts'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print( "get_hosts_info Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            # TODO: get error
+            print('vimconnector.get_hosts_info error getting host list {} {}'.format(vim_response.status_code, vim_response.json()))
+            return -vim_response.status_code, "Error getting host list"
+        
+        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+            
+        if res==False:
+            print("vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts)
+            return vimconn.HTTP_Internal_Server_Error, hosts
+    #obtain hosts details
+        hosts_dict={}
+        for host in hosts['hosts']:
+            url=self.url+'/hosts/'+host['id']
+            try:
+                vim_response = requests.get(url)
+            except requests.exceptions.RequestException as e:
+                print( "get_hosts_info Exception: ", e.args)
+                return -vimconn.HTTP_Not_Found, str(e.args[0])
+            print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+            if vim_response.status_code != 200:
+                print('vimconnector.get_hosts_info error getting detailed host {} {}'.format(vim_response.status_code, vim_response.json()))
+                continue
+            res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
+            if res==False:
+                print ("vimconnector.get_hosts_info error parsing GET HOSTS/{} vim response {}".format(host['id']), host_detail)
+                continue
+            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+            self.host_vim2gui(host_detail, hosts_dict)
+        return 200, hosts_dict
+
+    def get_hosts(self, vim_tenant):
+        '''Get the hosts and deployed instances
+        Returns the hosts content'''
+    #obtain hosts list
+        url=self.url+'/hosts'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_hosts Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_hosts error getting host list {} {}'.format(vim_response.status_code, vim_response.json()))
+            return -vim_response.status_code, "Error getting host list"
+        
+        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+            
+        if res==False:
+            print("vimconnector.get_host error parsing GET HOSTS vim response", hosts)
+            return vimconn.HTTP_Internal_Server_Error, hosts
+    #obtain instances from hosts
+        for host in hosts['hosts']:
+            url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
+            try:
+                vim_response = requests.get(url)
+            except requests.exceptions.RequestException as e:
+                print("get_hosts Exception: ", e.args)
+                return -vimconn.HTTP_Not_Found, str(e.args[0])
+            print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+            if vim_response.status_code != 200:
+                print('vimconnector.get_hosts error getting instances at host {} {}'.format(vim_response.status_code, vim_response.json()))
+                continue
+            res,servers = self._format_in(vim_response, get_server_response_schema)
+            if res==False:
+                print("vimconnector.get_host error parsing GET SERVERS/{} vim response {}".format(host['id']), servers)
+                continue
+            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+            host['instances'] = servers['servers']
+        return 200, hosts['hosts']
+
+    def get_processor_rankings(self):
+        '''Get the processor rankings in the VIM database'''
+        url=self.url+'/processor_ranking'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_processor_rankings Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get", url, "response:", vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_processor_rankings error getting processor rankings {} {}'.format(vim_response.status_code, vim_response.json()))
+            return -vim_response.status_code, "Error getting processor rankings"
+        
+        res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
+        return res, rankings['rankings']
+    
+    def new_host(self, host_data):
+        '''Adds a new host to VIM'''
+        '''Returns status code of the VIM response'''
+        payload_req = host_data
+        try:
+            url = self.url_admin+'/hosts'
+            self.logger.info("Adding a new host POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_host_response_schema)
+            r = self._remove_extra_items(response, new_host_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            host_id = response['host']['id']
+            return host_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+    
+    def new_external_port(self, port_data):
+        '''Adds a external port to VIM'''
+        '''Returns the port identifier'''
+        #TODO change to logging exception code policies
+        print( "VIMConnector: Adding a new external port")
+        payload_req = port_data
+        try:
+            vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            self.logger.error("new_external_port Exception: ", str(e))
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print( vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+        #print vim_response.json()
+        #print json.dumps(vim_response.json(), indent=4)
+            res, http_content = self._format_in(vim_response, new_port_response_schema)
+        #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_port_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                port_id = http_content['port']['id']
+                print("Port id: ",port_id)
+                return vim_response.status_code,port_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "{}": not possible to add new external port. HTTP Response: {}. Error: {}'.format(
+                self.url_admin, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+        
+    def new_external_network(self,net_name,net_type):
+        '''Adds a external network to VIM (shared)'''
+        '''Returns the network identifier'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Adding external shared network to VIM (type " + net_type + "): "+ net_name)
+        
+        payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
+        try:
+            vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            self.logger.error( "new_external_network Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_network_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_network_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                network_id = http_content['network']['id']
+                print( "Network id: ",network_id)
+                return vim_response.status_code,network_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "{}": not possible to add new external network. HTTP Response: {}. Error: {}'.format(
+                self.url, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+        
+    def connect_port_network(self, port_id, network_id, admin=False):
+        '''Connects a external port to a network'''
+        '''Returns status code of the VIM response'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Connecting external port to network")
+        
+        payload_req = '{"port":{"network_id":"' + network_id + '"}}'
+        if admin:
+            if self.url_admin==None:
+                return -vimconn.HTTP_Unauthorized, "datacenter cannot contain  admin URL"
+            url= self.url_admin
+        else:
+            url= self.url
+        try:
+            vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            print("connect_port_network Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_port_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_port_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                port_id = http_content['port']['id']
+                print("Port id: ",port_id)
+                return vim_response.status_code,port_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            print(vim_response.text)
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "{}": not possible to connect external port to network. HTTP Response: {}.' \
+                   ' Error: {}'.format(self.url_admin, vim_response.status_code, jsonerror)
+            print(text)
+            return -vim_response.status_code,text
+        
+
diff --git a/RO-VIM-openvim/requirements.txt b/RO-VIM-openvim/requirements.txt
new file mode 100644 (file)
index 0000000..f9797f8
--- /dev/null
@@ -0,0 +1,20 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
+
diff --git a/RO-VIM-openvim/setup.py b/RO-VIM-openvim/setup.py
new file mode 100644 (file)
index 0000000..19ac0ba
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_openvim"
+
+README = """
+===========
+osm-rovim_openvim
+===========
+
+osm-ro pluging for openvim VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for openvim',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    author_email='alfonso.tiernosepulveda@telefonica.com',
+    maintainer='Alfonso Tierno',
+    maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=[
+        "requests", "netaddr", "PyYAML",
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_openvim = osm_rovim_openvim.vimconn_openvim'],
+    },
+)
diff --git a/RO-VIM-openvim/stdeb.cfg b/RO-VIM-openvim/stdeb.cfg
new file mode 100644 (file)
index 0000000..50f9821
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro
diff --git a/RO-VIM-openvim/tox.ini b/RO-VIM-openvim/tox.ini
new file mode 100644 (file)
index 0000000..f17bf9a
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_openvim --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_openvim.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-VIM-vmware/Makefile b/RO-VIM-vmware/Makefile
new file mode 100644 (file)
index 0000000..283afdf
--- /dev/null
@@ -0,0 +1,26 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_rovim_vmware-*.tar.gz osm_rovim_vmware.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-rovim-vmware.postinst deb_dist/osm-rovim-vmware*/debian/
+       cd deb_dist/osm-rovim-vmware*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-VIM-vmware/debian/python3-osm-rovim-vmware.postinst b/RO-VIM-vmware/debian/python3-osm-rovim-vmware.postinst
new file mode 100755 (executable)
index 0000000..e7ce877
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROVIM-VMWARE"
+
+#Pip packages required for vmware connector
+python3 -m pip install --upgrade pip
+python3 -m pip install --upgrade pyvcloud==19.1.1
+python3 -m pip install --upgrade progressbar
+python3 -m pip install --upgrade pyvmomi
+# python3 -m pip install --upgrade prettytable
+# python3 -m pip install --upgrade pyang pyangbind
diff --git a/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py b/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py
new file mode 100755 (executable)
index 0000000..f2ab68c
--- /dev/null
@@ -0,0 +1,980 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact:  osslegalrouting@vmware.com
+##
+
+
+from vimconn_vmware import vimconnector
+from osm_ro.vimconn import vimconnUnexpectedResponse,vimconnNotFoundException,vimconnException
+from pyvcloud.vcd.client import Client
+from lxml import etree as lxmlElementTree
+from pyvcloud.vcd.org import Org
+from pyvcloud.vcd.vdc import VDC
+from pyvcloud.vcd.vapp import VApp
+import os
+import unittest
+import mock
+import test_vimconn_vmware_xml_response as xml_resp
+from os import path
+
+__author__ = "Prakash Kasar"
+
+class TestVimconn_VMware(unittest.TestCase):
+    def setUp(self):
+        config = { "admin_password": "admin",
+                  "admin_username":"user",
+                  "nsx_user": "nsx",
+                  "nsx_password": "nsx",
+                  "nsx_manager":"https://test-nsx" }
+
+        self.client = Client('test', verify_ssl_certs=False)
+
+        # get vcd org object
+        org_resp = xml_resp.org_xml_response
+        get_org = lxmlElementTree.fromstring(org_resp)
+        self.org = Org(self.client, resource=get_org)
+
+        self.vim = vimconnector(uuid='12354',
+                                 name='test',
+                         tenant_id='abc1234',
+                          tenant_name='test',
+                          url='https://test',
+                               config=config)
+
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_get_network_not_found(self, perform_request, connect, get_vdc_details):
+        """
+        Testcase to get network with invalid network id
+        """
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.return_value.status_code = 200
+        perform_request.return_value.content = xml_resp.vdc_xml_response
+
+        # call to VIM connector method with invalid id
+        self.assertRaises(vimconnNotFoundException,self.vim.get_network,'mgmt-net')
+
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    def test_get_network(self, connect, get_vdc_details, perform_request):
+        """
+        Testcase to get network with valid network id
+        """
+        net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.vdc_xml_response),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.network_xml_response)]
+        # call to VIM connector method with network_id
+        result = self.vim.get_network(net_id)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(net_id, result['id'])
+
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    def test_get_network_list_not_found(self, connect, get_vdc_details, perform_request):
+        """
+        Testcase to get list of available networks by invalid network id
+        """
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        network_xml_resp = xml_resp.network_xml_response
+        # created vdc object
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.return_value.status_code = 200
+        perform_request.return_value.content = network_xml_resp
+
+        # call to VIM connector method with network_id
+        result = self.vim.get_network_list({'id':'45hdfg-345nb-345'})
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(list(), result)
+
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    def test_get_network_list(self, connect, get_vdc_details, perform_request):
+        """
+        Testcase to get list of available networks by valid network id
+        """
+        #import pdb;pdb.set_trace() ## Not working
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
+        # created vdc object
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # created network object
+        network_xml_resp = xml_resp.network_xml_response
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.vdc_xml_response),
+                                       mock.Mock(status_code = 200,
+                                       content = network_xml_resp)]
+        perform_request.reset_mock()
+        perform_request()
+
+        # call to VIM connector method with network_id
+        result = self.vim.get_network_list({'id': net_id})
+
+        # assert verified expected and return result from VIM connector
+        for item in result:
+            self.assertEqual(item.get('id'), net_id)
+            self.assertEqual(item.get('status'), 'ACTIVE')
+            self.assertEqual(item.get('shared'), False)
+
+    @mock.patch.object(vimconnector,'create_network_rest')
+    def test_new_network(self, create_network_rest):
+        """
+        Testcase to create new network by passing network name and type
+        """
+        # create network reposnse
+        create_net_xml_resp = xml_resp.create_network_xml_response
+        net_name = 'Test_network'
+        net_type = 'bridge'
+        # assumed return value from VIM connector
+        create_network_rest.return_value = create_net_xml_resp
+        # call to VIM connector method with network name and type
+        result = self.vim.new_network(net_name, net_type)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, 'df1956fa-da04-419e-a6a2-427b6f83788f')
+
+    @mock.patch.object(vimconnector, 'create_network_rest')
+    def test_new_network_not_created(self, create_network_rest):
+        """
+        Testcase to create new network by assigning empty xml data
+        """
+        # assumed return value from VIM connector
+        create_network_rest.return_value = """<?xml version="1.0" encoding="UTF-8"?>
+                                              <OrgVdcNetwork></OrgVdcNetwork>"""
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnUnexpectedResponse,self.vim.new_network,
+                                                              'test_net',
+                                                                'bridge')
+
+    @mock.patch.object(vimconnector, 'connect')
+    @mock.patch.object(vimconnector, 'get_network_action')
+    @mock.patch.object(vimconnector, 'delete_network_action')
+    def test_delete_network(self, delete_network_action, get_network_action, connect):
+        """
+        Testcase to delete network by network id
+        """
+        net_uuid = '0a55e5d1-43a2-4688-bc92-cb304046bf87'
+        # delete network response
+        delete_net_xml_resp = xml_resp.delete_network_xml_response
+
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_network_action.return_value = delete_net_xml_resp
+        delete_network_action.return_value = True
+        # call to VIM connector method with network_id
+        result = self.vim.delete_network(net_uuid)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, net_uuid)
+
+    @mock.patch.object(vimconnector, 'get_vcd_network')
+    def test_delete_network_not_found(self, get_vcd_network):
+        """
+        Testcase to delete network by invalid network id
+        """
+        # assumed return value from VIM connector
+        get_vcd_network.return_value = False
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException,self.vim.delete_network,
+                                    '2a23e5d1-42a2-0648-bc92-cb508046bf87')
+
+    def test_get_flavor(self):
+        """
+        Testcase to get flavor data
+        """
+        flavor_data = {'a646eb8a-95bd-4e81-8321-5413ee72b62e': {'disk': 10,
+                                                                'vcpus': 1,
+                                                               'ram': 1024}}
+        vimconnector.flavorlist = flavor_data
+        result = self.vim.get_flavor('a646eb8a-95bd-4e81-8321-5413ee72b62e')
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, flavor_data['a646eb8a-95bd-4e81-8321-5413ee72b62e'])
+
+    def test_get_flavor_not_found(self):
+        """
+        Testcase to get flavor data with invalid id
+        """
+        vimconnector.flavorlist = {}
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException,self.vim.get_flavor,
+                                'a646eb8a-95bd-4e81-8321-5413ee72b62e')
+
+    def test_new_flavor(self):
+        """
+        Testcase to create new flavor data
+        """
+        flavor_data = {'disk': 10, 'vcpus': 1, 'ram': 1024}
+        result = self.vim.new_flavor(flavor_data)
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+    def test_delete_flavor(self):
+        """
+        Testcase to delete flavor data
+        """
+        flavor_data = {'2cb3dffb-5c51-4355-8406-28553ead28ac': {'disk': 10,
+                                                                'vcpus': 1,
+                                                               'ram': 1024}}
+        vimconnector.flavorlist = flavor_data
+        # return value from VIM connector
+        result = self.vim.delete_flavor('2cb3dffb-5c51-4355-8406-28553ead28ac')
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, '2cb3dffb-5c51-4355-8406-28553ead28ac')
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_image_not_found(self, perform_request, connect_as_admin):
+        """
+        Testcase to delete image by invalid image id
+        """
+        # creating conn object
+        self.vim.client = self.vim.connect_as_admin()
+
+        # assumed return value from VIM connector
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.delete_catalog_xml_response),
+                                       mock.Mock(status_code = 201,
+                                       content = xml_resp.delete_catalog_item_xml_response)
+                                       ]
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException, self.vim.delete_image, 'invali3453')
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(Org,'list_catalogs')
+    def test_get_image_list(self, list_catalogs, connect, get_vdc_details):
+        """
+        Testcase to get image list by valid image id
+        """
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        self.vim.client = self.vim.connect()
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2018-01-26T02:09:12.387-08:00', 'id': 'b139ed82-7ca4-49fb-9882-5f841f59c890', 'name': 'Ubuntu_plugtest-1'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org2', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-06-18T21:33:16.430-07:00', 'id': 'b31e6973-86d2-404b-a522-b16846d099dc', 'name': 'Ubuntu_Cat'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '0', 'creationDate': '2018-02-15T22:26:28.910-08:00', 'id': 'c3b56180-f980-4256-9109-a93168d73ff2', 'name': 'de4ffcf2ad21f1a5d0714d6b868e2645'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-08-23T05:54:56.780-07:00', 'id': 'd0eb0b02-718d-42e0-b889-56575000b52d', 'name': 'Test_Cirros'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-03-08T21:25:05.923-08:00', 'id': 'd3fa3df2-b311-4571-9138-4c66541d7f46', 'name': 'cirros_10'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-07-12T22:45:20.537-07:00', 'id': 'd64b2617-ea4b-4b90-910b-102c99dd2031', 'name': 'Ubuntu16'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '1', 'creationDate': '2017-10-14T23:52:37.260-07:00', 'id': 'e8d953db-8dc9-46d5-9cab-329774cd2ad9', 'name': 'Ubuntu_no_nic'}]
+
+        result = self.vim.get_image_list({'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a'})
+
+        # assert verified expected and return result from VIM connector
+        for item in result:
+            self.assertEqual(item['id'], '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a')
+
+    @mock.patch.object(vimconnector,'get_vapp_details_rest')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_get_vminstance(self, get_vdc_details, get_vapp_details_rest):
+        """
+        Testcase to get vminstance by valid vm id
+        """
+        vapp_info = {'status': '4',
+                   'acquireMksTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket',
+                   'type': 'application/vnd.vmware.vcloud.mksTicket+xml', 'rel': 'screen:acquireMksTicket'},
+                   'vm_virtual_hardware': {'disk_edit_href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks', 'disk_size': '40960'},
+                   'name': 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa',
+                   'created': '2017-09-21T01:15:31.627-07:00',
+                    'IsEnabled': 'true',
+                   'EndAddress': '12.16.24.199',
+                   'interfaces': [{'MACAddress': '00:50:56:01:12:a2',
+                                   'NetworkConnectionIndex': '0',
+                                   'network': 'testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d',
+                                   'IpAddressAllocationMode': 'DHCP',
+                                   'IsConnected': 'true',
+                                   'IpAddress': '12.16.24.200'}],
+                   'ovfDescriptorUploaded': 'true',
+                   'nestedHypervisorEnabled': 'false',
+                   'Gateway': '12.16.24.1',
+                   'acquireTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket',
+                   'rel': 'screen:acquireTicket'},
+                   'vmuuid': '47d12505-5968-4e16-95a7-18743edb0c8b',
+                   'Netmask': '255.255.255.0',
+                   'StartAddress': '12.16.24.100',
+                   'primarynetwork': '0',
+                   'networkname': 'External-Network-1074',
+                   'IsInherited': 'false',
+                   'deployed': 'true'} 
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_vapp_details_rest.return_value = vapp_info
+
+        result = self.vim.get_vminstance('47d12505-5968-4e16-95a7-18743edb0c8b')
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result['status'], 'ACTIVE')
+        self.assertEqual(result['hostId'], '47d12505-5968-4e16-95a7-18743edb0c8b')
+
+
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(VApp,'power_off')
+    @mock.patch.object(VApp,'undeploy')
+    @mock.patch.object(VDC,'delete_vapp')
+    @mock.patch.object(Client,'get_task_monitor')
+    def test_delete_vminstance(self, get_task_monitor, delete_vapp,
+                                               undeploy, poweroff,
+                                         get_vapp, get_vdc_details,
+                                        get_namebyvappid, connect):
+        """
+        Testcase to delete vminstance by valid vm id
+        """
+        vm_id = '4f6a9b49-e92d-4935-87a1-0e4dc9c3a069'
+        vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_name = vm_name
+
+        vapp_resp = xml_resp.vapp_xml_response
+        vapp = lxmlElementTree.fromstring(vapp_resp)
+        get_vapp.return_value = vapp
+
+        power_off_resp = xml_resp.poweroff_task_xml
+        power_off = lxmlElementTree.fromstring(power_off_resp)
+        poweroff.return_value = power_off
+
+        status_resp = xml_resp.status_task_xml
+        status = lxmlElementTree.fromstring(status_resp)
+        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+
+        # call to VIM connector method
+        result = self.vim.delete_vminstance(vm_id)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, vm_id)
+
+    @mock.patch.object(vimconnector,'get_network_id_by_name')
+    @mock.patch.object(vimconnector,'get_vm_pci_details')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(VApp,'get_all_vms')
+    def test_refresh_vms_status(self, get_all_vms, perform_request, get_vdc_details,
+                                                          get_namebyvappid, connect,
+                                                       get_vapp, get_vm_pci_details,
+                                                            get_network_id_by_name):
+        """
+        Testcase to refresh vms status by valid vm id
+        """
+        vm_id = '53a529b2-10d8-4d56-a7ad-8182acdbe71c'
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_vdc_details.return_value = self.org, vdc
+
+        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        get_vm_pci_details.return_value = {'host_name': 'test-esx-1.corp.local', 'host_ip': '12.19.24.31'}
+        vapp_resp = xml_resp.vapp_xml_response
+        vapp = lxmlElementTree.fromstring(vapp_resp)
+        get_vapp.return_value = vapp
+        get_network_id_by_name.return_value = '47d12505-5968-4e16-95a7-18743edb0c8b'
+
+        vm_resp = xml_resp.vm_xml_response
+        vm_list = lxmlElementTree.fromstring(vm_resp)
+        get_all_vms.return_value = vm_list
+
+        perform_request.return_value.status_code = 200
+        perform_request.return_value.content = vm_resp
+        # call to VIM connector method
+        result = self.vim.refresh_vms_status([vm_id])
+        for attr in result[vm_id]:
+            if attr == 'status':
+                # assert verified expected and return result from VIM connector
+                self.assertEqual(result[vm_id][attr], 'ACTIVE')
+
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    def test_refresh_nets_status(self, get_vcd_network):
+        net_id = 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'
+        network_dict = {'status': '1','isShared': 'false','IpScope': '',
+                        'EndAddress':'12.19.21.15',
+                        'name': 'testing_gwyRXlvWYL1-9ebb6d7b-5c74-472f-be77-963ed050d44d',
+                        'Dns1': '12.19.21.10', 'IpRanges': '',
+                        'Gateway': '12.19.21.23', 'Netmask': '255.255.255.0',
+                        'RetainNetInfoAcrossDeployments': 'false',
+                        'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local',
+                        'StartAddress': '12.19.21.11', 'IpRange': '',
+                        'Configuration': '', 'FenceMode': 'bridged',
+                        'IsInherited': 'true', 'uuid': 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'}
+        # assumed return value from VIM connector
+        get_vcd_network.return_value = network_dict
+        result = self.vim.refresh_nets_status([net_id])
+        # assert verified expected and return result from VIM connector
+        for attr in result[net_id]:
+            if attr == 'status':
+                self.assertEqual(result[net_id][attr], 'ACTIVE')
+
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_action_vminstance(self, get_vdc_details, get_namebyvappid,
+                                                               connect,
+                                                             get_vapp):
+        """
+        Testcase for action vm instance by vm id
+        """
+        task_resp = xml_resp.poweroff_task_xml
+        vm_id = '05e6047b-6938-4275-8940-22d1ea7245b8'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        self.vim.client = self.vim.connect()
+        power_off_resp = xml_resp.poweroff_task_xml
+        power_off = lxmlElementTree.fromstring(power_off_resp)
+        get_vapp.return_value.undeploy.return_value = power_off
+
+        status_resp = xml_resp.status_task_xml
+        status = lxmlElementTree.fromstring(status_resp)
+        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+
+        # call to VIM connector method
+        result = self.vim.action_vminstance(vm_id,{'shutdown': None})
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, vm_id)
+
+    @mock.patch.object(vimconnector,'get_org')
+    def test_get_tenant_list(self, get_org):
+        """
+        Test case for get tenant list
+        """
+        org_dict = {'catalogs': {'4c4fdb5d-0c7d-4fee-9efd-cb061f327a01': '80d8488f67ba1de98b7f485fba6abbd2', '1b98ca02-b0a6-4ca7-babe-eadc0ae59677': 'Ubuntu', 'e7f27dfe-14b7-49e1-918e-173bda02683a': '834bdd1f28fd15dcbe830456ec58fbca', '9441ee69-0486-4438-ac62-8d8082c51302': 'centos', 'e660cce0-47a6-4315-a5b9-97a39299a374': 'cirros01', '0fd96c61-c3d1-4abf-9a34-0dff8fb65743': 'cirros034', '1c703be3-9bd2-46a2-854c-3e678d5cdda8': 'Ubuntu_plugtest-1', 'bc4e342b-f84c-41bd-a93a-480f35bacf69': 'Cirros', '8a206fb5-3ef9-4571-9bcc-137615f4d930': '255eb079a62ac155e7f942489f14b0c4'}, 'vdcs': {'e6436c6a-d922-4b39-9c1c-b48e766fce5e': 'osm', '3852f762-18ae-4833-a229-42684b6e7373': 'cloud-1-vdc'}, 'networks': {'e203cacd-9320-4422-9be0-12c7def3ab56': 'testing_lNejr37B-38e4ca67-1e26-486f-ad2f-f14bb099e068', 'a6623349-2bef-4367-9fda-d33f9ab927f8': 'Vlan_3151', 'adf780cb-358c-47c2-858d-ae5778ccaf17': 'testing_xwBultc-99b8a2ae-c091-4dd3-bbf7-762a51612385', '721f9efc-11fe-4c13-936d-252ba0ed93c8': 'testing_tLljy8WB5e-a898cb28-e75b-4867-a22e-f2bad285c144', '1512d97a-929d-4b06-b8af-cf5ac42a2aee': 'Managment', 'd9167301-28af-4b89-b9e0-09f612e962fa': 'testing_prMW1VThk-063cb428-eaee-44b8-9d0d-df5fb77a5b4d', '004ae853-f899-43fd-8981-7513a3b40d6b': 'testing_RTtKVi09rld-fab00b16-7996-49af-8249-369c6bbfa02d'}}
+        tenant_name = 'osm'
+        get_org.return_value = org_dict
+
+        # call to VIM connector method
+        results = self.vim.get_tenant_list({'name' : tenant_name})
+        # assert verified expected and return result from VIM connector
+        for result in results:
+            self.assertEqual(tenant_name,result['name'])
+
+    @mock.patch.object(vimconnector,'get_org')
+    def test_get_tenant_list_negative(self, get_org):
+        """
+        Test case for get tenant list negative
+        """
+        org_dict = {'vdcs': {}}
+        tenant_name = 'testosm'
+        get_org.return_value = org_dict
+
+        # call to VIM connector method
+        results = self.vim.get_tenant_list({'name' : tenant_name})
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(results, [])
+
+    @mock.patch.object(vimconnector,'create_vdc')
+    def test_new_tenant(self, create_vdc):
+        """
+        Test case for create new tenant
+        """
+        tenant_name = 'test'
+        vdc = {'a493aa2c-3104-4d63-969b-fc9e72304c9f': 'https://localhost/api/task/e658d84c-007d-4fd8-9590-3a8f93cc0de4'}
+        create_vdc.return_value = vdc
+
+        # call to VIM connector method
+        result = self.vim.new_tenant(tenant_name)
+        # assert verified expected and return result from VIM connector
+        self.assertEqual('a493aa2c-3104-4d63-969b-fc9e72304c9f', result)
+
+    @mock.patch.object(vimconnector,'create_vdc')
+    def test_new_tenant_negative(self, create_vdc):
+        """
+        Test case for create new tenant
+        """
+        tenant_name = 'test'
+        create_vdc.return_value = None
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnException,self.vim.new_tenant,tenant_name)
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_tenant(self, perform_request, connect, connect_as_admin):
+        """
+        Test case to delete tenant
+        """
+        tenant_id = '753227f5-d6c6-4478-9546-acc5cfff21e9'
+        delete_tenant_resp = xml_resp.delete_tenant
+
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = delete_tenant_resp),
+                                       mock.Mock(status_code = 202,
+                                       content = None)
+                                       ]
+
+        # call to VIM connector method
+        result = self.vim.delete_tenant(tenant_id)
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(tenant_id, result)
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_tenant_negative(self, perform_request, connect, connect_as_admin):
+        """
+        Test case to delete tenant
+        """
+        tenant_id = 'ten45klsjdf'
+
+        self.vim.client = self.vim.connect()
+        perform_request.return_value.status_code = 201
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException,self.vim.delete_tenant,tenant_id)
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    @mock.patch.object(Org,'get_vdc')
+    @mock.patch.object(Org,'get_catalog_item')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(Client,'get_task_monitor')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'get_network_list')
+    @mock.patch.object(vimconnector,'power_on_vapp')
+    def test_new_vminstance(self, power_on, get_network_list, get_vapp,
+                            get_task_monitor, perform_request, connect,
+                            get_catalog_item, get_vdc, get_vcd_network,
+                                       list_catalogs, get_vdc_details):
+        """
+        Test case for new vm instance
+        """
+        image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
+        vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
+                                                                            'disk': 10,
+                                                                            'ram': 1024}}
+
+        flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
+        net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
+
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
+
+        network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
+
+        network_list = [{'status': 'ACTIVE', 'name': 'default', 'admin_state_up': True, 'shared': False, 'tenant_id': '2584137f-6541-4c04-a2a2-e56bfca14c69', 'type': 'bridge', 'id': '1fd6421e-929a-4576-bc19-a0c48aea1969'}]
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog_list = lxmlElementTree.fromstring(xml_resp.catalog_list_xml)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        get_vcd_network.return_value = network_dict
+        get_vdc.return_value = vdc
+        get_catalog_item.return_value = catalog_list
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.catalogItem_xml),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.vapp_template_xml),
+                                       mock.Mock(status_code = 201,
+                                       content = xml_resp.deployed_vapp_xml)]
+
+        status_resp = xml_resp.status_task_xml
+        status = lxmlElementTree.fromstring(status_resp)
+        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+        vapp_resp = xml_resp.vapp_xml_response
+        vapp = lxmlElementTree.fromstring(vapp_resp)
+        get_vapp.return_value = vapp
+        get_network_list.return_value = network_list
+        power_on_resp = xml_resp.poweroff_task_xml
+        poweron = lxmlElementTree.fromstring(power_on_resp)
+        power_on.return_value = poweron
+
+        # call to VIM connector method
+        result = self.vim.new_vminstance(name='Test1_vm', image_id=image_id,
+                                                        flavor_id=flavor_id,
+                                                          net_list=net_list)
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    @mock.patch.object(Org,'get_vdc')
+    @mock.patch.object(Org,'get_catalog_item')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(Client,'get_task_monitor')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'get_network_list')
+    @mock.patch.object(vimconnector,'power_on_vapp')
+    def test_new_vminstance_negative(self, power_on, get_network_list, get_vapp,
+                            get_task_monitor, perform_request, connect,
+                            get_catalog_item, get_vdc, get_vcd_network,
+                                       list_catalogs, get_vdc_details):
+        """
+        Test case for new vm instance
+        """
+        image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
+        vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
+                                                                            'disk': 10,
+                                                                            'ram': 1024}}
+        flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
+        net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
+
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
+
+        network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog_list = lxmlElementTree.fromstring(xml_resp.catalog_list_xml)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        get_vcd_network.return_value = network_dict
+        get_vdc.return_value = vdc
+        get_catalog_item.return_value = catalog_list
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.catalogItem_xml),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.vapp_template_xml),
+                                       mock.Mock(status_code = 400,
+                                       content = "Bad request error")]
+
+        # call to VIM connector method
+        self.assertRaises(vimconnUnexpectedResponse,self.vim.new_vminstance,
+                                                                 name='Test1_vm',
+                                                                 image_id=image_id,
+                                                                 flavor_id=flavor_id,
+                                                                 net_list=net_list)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(path,'isfile')
+    @mock.patch.object(os,'access')
+    def test_new_image(self, access, isfile,
+                              get_vdc_details,
+                                list_catalogs,
+                               create_catalog,
+                               upload_vimimage,
+                                get_catalogid):
+        """
+        Test case for create new image
+        """
+        path = '/tmp/cirros/cirros.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '9759-0d6799063b51', 'name': 'cirros'}]
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        isfile.return_value = True
+        access.return_value = True
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = True
+        get_catalogid.return_value = '9759-0d6799063b51'
+        result = self.vim.new_image({'name': 'TestImage', 'location' : path})
+
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_new_image_negative(self, get_vdc_details, list_catalogs,
+                                              create_catalog,
+                                              upload_vimimage,
+                                              get_catalogid):
+        """
+        Test case for create new image with negative scenario
+        """
+        path = '/tmp/cirros/cirros.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org1', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'test'}]
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = False
+        get_catalogid.return_value = '34925a30-0f4a-4018-9759-0d6799063b51'
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnException,self.vim.new_image,{'name':'TestImage', 'location':path})
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_image(self, perform_request, connect_as_admin):
+        """
+        Testcase to delete image by image id
+        """
+        image_id = 'f3bf3733-465b-419f-b675-52f91d18edbb'
+        # creating conn object
+        self.vim.client = self.vim.connect_as_admin()
+
+        # assumed return value from VIM connector
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.delete_catalog_xml_response),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.delete_catalog_item_xml_response),
+                                       mock.Mock(status_code = 204,
+                                       content = ''),
+                                       mock.Mock(status_code = 204,
+                                       content = '')
+                                       ]
+
+        # call to vim connctor method
+        result = self.vim.delete_image(image_id)
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(image_id, result)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(path,'isfile')
+    @mock.patch.object(os,'access')
+    def test_get_image_id_from_path(self, access, isfile,
+                                              get_vdc_details,
+                                              list_catalogs,
+                                              create_catalog,
+                                              upload_vimimage,
+                                              get_catalogid):
+        """
+        Test case to get image id from image path
+        """
+        path = '/tmp/ubuntu/ubuntu.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        isfile.return_value = True
+        access.return_value = True
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = True
+        get_catalogid.return_value = '7208-0f6777052c30'
+        result = self.vim.get_image_id_from_path(path=path)
+
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(path,'isfile')
+    @mock.patch.object(os,'access')
+    def test_get_image_id_from_path_negative(self, access, isfile,
+                                              get_vdc_details,
+                                              list_catalogs,
+                                              create_catalog,
+                                              upload_vimimage,
+                                              get_catalogid):
+        """
+        Test case to get image id from image path with negative scenario
+        """
+        path = '/tmp/ubuntu/ubuntu.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        isfile.return_value = True
+        access.return_value = True
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = False
+        get_catalogid.return_value = '7208-0f6777052c30'
+        self.assertRaises(vimconnException, self.vim.get_image_id_from_path, path)
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(Org,'list_catalogs')
+    def test_get_image_list_negative(self, list_catalogs, connect, get_vdc_details):
+        """
+        Testcase to get image list by invalid image id
+        """
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        self.vim.client = self.vim.connect()
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}]
+
+        # call to vim connector method with invalid image id
+        self.vim.get_image_list({'id': 'b46c-3f35ba45ca4a'})
+
+    @mock.patch.object(vimconnector,'get_vapp_details_rest')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_get_vminstance_negative(self, get_vdc_details, get_vapp_details_rest):
+        """
+        Testcase to get vminstance by invalid vm id
+        """
+
+        invalid_vmid = '18743edb0c8b-sdfsf-fg'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_vapp_details_rest.return_value = False
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException, self.vim.get_vminstance,invalid_vmid)
+
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(VDC,'get_vapp')
+    def test_delete_vminstance_negative(self, get_vapp, get_vdc_details,
+                                             get_namebyvappid, connect):
+        """
+        Testcase to delete vminstance by invalid vm id
+        """
+        vm_id = 'sdfrtt4935-87a1-0e4dc9c3a069'
+        vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_name = vm_name
+
+        get_vapp.return_value = None
+
+        # call to VIM connector method
+        self.assertRaises(vimconnException, self.vim.delete_vminstance,vm_id)
+
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    def test_refresh_nets_status_negative(self, get_vcd_network):
+        """
+        Testcase for refresh nets status by invalid vm id
+        """
+        net_id = 'sjkldf-456mfd-345'
+
+        # assumed return value from VIM connector
+        get_vcd_network.return_value = None
+        result = self.vim.refresh_nets_status([net_id])
+
+        # assert verified expected and return result from VIM connector
+        for attr in result[net_id]:
+            if attr == 'status':
+                self.assertEqual(result[net_id][attr], 'DELETED')
+
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_action_vminstance_negative(self, get_vdc_details,
+                                             get_namebyvappid,
+                                                     connect):
+        """
+        Testcase for action vm instance by invalid action
+        """
+        vm_id = '8413-4cb8-bad7-b5afaec6f9fa'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        self.vim.client = self.vim.connect()
+
+        # call to VIM connector method
+        self.assertRaises(vimconnException, self.vim.action_vminstance, vm_id,{'invalid': None})
diff --git a/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware_xml_response.py b/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware_xml_response.py
new file mode 100644 (file)
index 0000000..968cb1f
--- /dev/null
@@ -0,0 +1,637 @@
+# -*- coding: utf-8 -*-\r
+\r
+##\r
+# Copyright 2016-2017 VMware Inc.\r
+# This file is part of ETSI OSM\r
+# All Rights Reserved.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License"); you may\r
+# not use this file except in compliance with the License. You may obtain\r
+# a copy of the License at\r
+#\r
+#         http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
+# License for the specific language governing permissions and limitations\r
+# under the License.\r
+#\r
+# For those usages not covered by the Apache License, Version 2.0 please\r
+# contact:  osslegalrouting@vmware.com\r
+##\r
+\r
+vdc_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+        <Vdc xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="Org3-VDC-PVDC1" id="urn:vcloud:vdc:2584137f-6541-4c04-a2a2-e56bfca14c69" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+               <Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
+               <Link rel="down" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+               <Link rel="edit" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/uploadVAppTemplate" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/media" type="application/vnd.vmware.vcloud.media+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/instantiateOvf" type="application/vnd.vmware.vcloud.instantiateOvfParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/instantiateVAppTemplate" type="application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneVApp" type="application/vnd.vmware.vcloud.cloneVAppParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneVAppTemplate" type="application/vnd.vmware.vcloud.cloneVAppTemplateParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneMedia" type="application/vnd.vmware.vcloud.cloneMediaParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/composeVApp" type="application/vnd.vmware.vcloud.composeVAppParams+xml"/>\r
+               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/disk" type="application/vnd.vmware.vcloud.diskCreateParams+xml"/>\r
+               <Link rel="edgeGateways" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/edgeGateways" type="application/vnd.vmware.vcloud.query.records+xml"/>\r
+               <Link rel="add" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/networks" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\r
+               <Link rel="orgVdcNetworks" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/networks" type="application/vnd.vmware.vcloud.query.records+xml"/>\r
+               <Link rel="alternate" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.admin.vdc+xml"/>\r
+               <Description>Org3-VDC-PVDC1</Description>\r
+               <AllocationModel>AllocationVApp</AllocationModel>\r
+               <ComputeCapacity>\r
+               <Cpu>\r
+               <Units>MHz</Units>\r
+               <Allocated>0</Allocated>\r
+               <Limit>0</Limit>\r
+               <Reserved>0</Reserved>\r
+               <Used>2000</Used>\r
+               <Overhead>0</Overhead>\r
+               </Cpu>\r
+               <Memory>\r
+               <Units>MB</Units>\r
+               <Allocated>0</Allocated>\r
+               <Limit>0</Limit>\r
+               <Reserved>0</Reserved>\r
+               <Used>2048</Used>\r
+               <Overhead>71</Overhead>\r
+               </Memory>\r
+               </ComputeCapacity>\r
+               <ResourceEntities>\r
+               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-2999a787-ca96-4d1c-8b7c-9d0a8bd14bce" name="cirros" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
+        <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-324649a3-d263-4446-aace-4e2c801a85bd" name="cirros_10" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-8ea35d43-0c72-4267-bac9-42e4a5248c32" name="Test_Cirros" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-9bf292a2-58c4-4d4b-995b-623e88b74226" name="Ubuntu-vm" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-be93140e-da0d-4b8c-8ab4-06d132bf47c0" name="Ubuntu16" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vApp/vapp-0da5344d-4d65-4362-bac6-e8524c97edb1" name="Inst10.linux1.a-e9f75c31-eadf-4b48-9a5e-d957314530d7" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vApp/vapp-3e0df975-1380-4544-9f25-0683f9eb41f0" name="Inst12.linux1.a-93854e6d-d87c-4f0a-ba10-eaf59d7555bf" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vApp/vapp-6f5848b8-5498-4854-a35e-45cb25b8fdb0" name="Inst11.linux1.a-5ca666e8-e077-4268-aff2-99960af28eb5" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+               <ResourceEntity href="https://localhost/api/vApp/vapp-76510a06-c949-4bea-baad-629daaccb84a" name="cirros_nsd.cirros_vnfd__1.a-a9c957c4-29a5-4559-a630-00ae028592f7" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+               </ResourceEntities><AvailableNetworks><Network href="https://localhost/api/network/1627b438-68bf-44be-800c-8f48029761f6" name="default-17c27654-2a45-4713-a799-94cb91de2610" type="application/vnd.vmware.vcloud.network+xml"/>\r
+               <Network href="https://localhost/api/network/190e9e04-a904-412b-877e-92d8e8699abd" name="cirros_nsd.cirros_nsd_vld1-86c861a9-d985-4e31-9c20-21de1e8a619d" type="application/vnd.vmware.vcloud.network+xml"/>\r
+               <Network href="https://localhost/api/network/3838c23e-cb0e-492f-a91f-f3352918ff8b" name="cirros_nsd.cirros_nsd_vld1-75ce0375-b2e6-4b7f-b821-5b395276bcd8" type="application/vnd.vmware.vcloud.network+xml"/>\r
+               <Network href="https://localhost/api/network/5aca5c32-c0a2-4e1b-980e-8fd906a49f4e" name="default-60a54140-66dd-4806-8ca3-069d34530478" type="application/vnd.vmware.vcloud.network+xml"/>\r
+               <Network href="https://localhost/api/network/de854aa2-0b77-4ace-a696-85494a3dc3c4" name="default-971acee6-0298-4085-b107-7601bc8c8712" type="application/vnd.vmware.vcloud.network+xml"/>\r
+               </AvailableNetworks>\r
+               <Capabilities>\r
+               <SupportedHardwareVersions>\r
+               <SupportedHardwareVersion>vmx-04</SupportedHardwareVersion>\r
+               <SupportedHardwareVersion>vmx-07</SupportedHardwareVersion>\r
+               <SupportedHardwareVersion>vmx-08</SupportedHardwareVersion>\r
+               <SupportedHardwareVersion>vmx-09</SupportedHardwareVersion>\r
+               <SupportedHardwareVersion>vmx-10</SupportedHardwareVersion>\r
+               <SupportedHardwareVersion>vmx-11</SupportedHardwareVersion>\r
+               </SupportedHardwareVersions>\r
+               </Capabilities>\r
+               <NicQuota>0</NicQuota>\r
+               <NetworkQuota>1000</NetworkQuota>\r
+               <UsedNetworkCount>0</UsedNetworkCount>\r
+               <VmQuota>0</VmQuota>\r
+               <IsEnabled>true</IsEnabled>\r
+               <VdcStorageProfiles>\r
+               <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/3b82941c-11ed-407e-ada0-42d282fcd425" name="NFS Storage Policy" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\r
+               <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\r
+               </VdcStorageProfiles>\r
+        <VCpuInMhz2>1000</VCpuInMhz2>\r
+        </Vdc>"""\r
+\r
+network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+             <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="testing_6XXftDTroat1-03b18565-de01-4154-af51-8dbea42f0d84" id="urn:vcloud:network:5c04dc6d-6096-47c6-b72b-68f19013d491" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+             <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+             <Link rel="down" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+             <Link rel="down" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491/allocatedAddresses/" type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>\r
+             <Description>Openmano created</Description>\r
+             <Configuration>\r
+             <IpScopes>\r
+             <IpScope>\r
+             <IsInherited>true</IsInherited>\r
+             <Gateway>12.169.24.23</Gateway>\r
+             <Netmask>255.255.255.0</Netmask>\r
+             <Dns1>12.169.24.102</Dns1>\r
+             <DnsSuffix>corp.local</DnsSuffix>\r
+             <IsEnabled>true</IsEnabled>\r
+             <IpRanges>\r
+             <IpRange>\r
+             <StartAddress>12.169.24.115</StartAddress>\r
+             <EndAddress>12.169.241.150</EndAddress>\r
+             </IpRange>\r
+             </IpRanges>\r
+             </IpScope>\r
+             </IpScopes>\r
+             <FenceMode>bridged</FenceMode>\r
+             <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>\r
+             </Configuration>\r
+             <IsShared>false</IsShared>\r
+             </OrgVdcNetwork>"""\r
+\r
+delete_network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+            <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="testing_negjXxdlB-7fdcf9f3-de32-4ae6-b9f9-fb725a80a74f" id="urn:vcloud:network:0a55e5d1-43a2-4688-bc92-cb304046bf87" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+                       <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+                       <Link rel="down" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+                       <Link rel="down" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87/allocatedAddresses/"  type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>\r
+                       <Description>Openmano created</Description>\r
+                       <Configuration>\r
+                       <IpScopes>\r
+                       <IpScope>\r
+                       <IsInherited>true</IsInherited>\r
+                       <Gateway>12.169.24.23</Gateway>\r
+                       <Netmask>255.255.255.0</Netmask>\r
+                       <Dns1>12.169.24.102</Dns1>\r
+                       <DnsSuffix>corp.local</DnsSuffix>\r
+                       <IsEnabled>true</IsEnabled>\r
+                       <IpRanges>\r
+                       <IpRange>\r
+                       <StartAddress>12.169.241.115</StartAddress>\r
+                       <EndAddress>12.169.241.150</EndAddress>\r
+                       </IpRange></IpRanges></IpScope>\r
+                       </IpScopes>\r
+                       <FenceMode>bridged</FenceMode>\r
+                       <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>\r
+                       </Configuration>\r
+                       <IsShared>false</IsShared>\r
+                       </OrgVdcNetwork>"""\r
+\r
+create_network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+            <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" name="Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a" id="urn:vcloud:network:df1956fa-da04-419e-a6a2-427b6f83788f" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+            <Link rel="edit" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\r
+            <Link rel="remove" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f"/>\r
+            <Link rel="repair" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/action/reset"/>\r
+            <Link rel="up" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.admin.vdc+xml"/>\r
+            <Link rel="down" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+            <Link rel="down" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/allocatedAddresses/" type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>\r
+            <Description>Openmano created</Description>\r
+            <Tasks>\r
+                  <Task cancelRequested="false" expiryTime="2017-12-14T02:00:39.865-08:00" operation="Creating Network Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a(df1956fa-da04-419e-a6a2-427b6f83788f)" operationName="networkCreateOrgVdcNetwork" serviceNamespace="com.vmware.vcloud" startTime="2017-09-15T02:00:39.865-07:00" status="queued" name="task" id="urn:vcloud:task:0600f592-42ce-4d58-85c0-212c569ba6e6" href="https://localhost/api/task/0600f592-42ce-4d58-85c0-212c569ba6e6" type="application/vnd.vmware.vcloud.task+xml">\r
+                  <Owner href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" name="Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a" type="application/vnd.vmware.admin.network+xml"/>\r
+                  <User href="https://localhost/api/admin/user/f49f28e0-7172-4b17-aaee-d171ce2b60da" name="administrator" type="application/vnd.vmware.admin.user+xml"/>\r
+                  <Organization href="https://localhost/api/org/a93c9db9-7471-3192-8d09-a8f7eeda85f9" name="System" type="application/vnd.vmware.vcloud.org+xml"/>\r
+                  <Details/>\r
+                  </Task>\r
+            </Tasks>\r
+            <Configuration>\r
+            <IpScopes><IpScope>\r
+            <IsInherited>false</IsInherited>\r
+            <Gateway>12.16.113.1</Gateway>\r
+            <Netmask>255.255.255.0</Netmask>\r
+            <Dns1>12.16.113.2</Dns1>\r
+            <IsEnabled>true</IsEnabled>\r
+            <IpRanges><IpRange>\r
+            <StartAddress>12.168.113.3</StartAddress>\r
+            <EndAddress>12.168.113.52</EndAddress>\r
+            </IpRange></IpRanges>\r
+            </IpScope></IpScopes>\r
+            <ParentNetwork href="https://localhost/api/admin/network/19b01b42-c862-4d0f-bcbf-d053e7396fc0" name="" type="application/vnd.vmware.admin.network+xml"/>\r
+            <FenceMode>bridged</FenceMode>\r
+            <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>\r
+            </Configuration><IsShared>false</IsShared>\r
+            </OrgVdcNetwork>"""\r
+\r
+catalog1_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="Ubuntu-vm" id="urn:vcloud:catalog:d0a11b12-780e-4681-babb-2b1fd6693f62" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
+<Link rel="copy" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
+<Link rel="move" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/> <Description>Ubuntu-vm</Description>\r
+<CatalogItems><CatalogItem href="https://localhost/api/catalogItem/04fc0041-8e40-4e37-b072-7dba3e1c6a30" id="04fc0041-8e40-4e37-b072-7dba3e1c6a30" name="Ubuntu-vm" type="application/vnd.vmware.vcloud.catalogItem+xml"/></CatalogItems><IsPublished>false</IsPublished><DateCreated>2017-03-17T03:17:11.293-07:00</DateCreated><VersionNumber>5</VersionNumber>\r
+</Catalog>"""\r
+\r
+catalog2_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="cirros" id="urn:vcloud:catalog:32ccb082-4a65-41f6-bcd6-38942e8a3829" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
+<Link rel="copy" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
+<Link rel="move" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/> <Description>cirros</Description>\r
+<CatalogItems><CatalogItem href="https://localhost/api/catalogItem/98316d41-e38c-40c2-ac28-5462e8aada8c" id="98316d41-e38c-40c2-ac28-5462e8aada8c" name="cirros" type="application/vnd.vmware.vcloud.catalogItem+xml"/></CatalogItems><IsPublished>false</IsPublished><DateCreated>2017-03-08T02:06:07.003-08:00</DateCreated><VersionNumber>5</VersionNumber>\r
+</Catalog>"""\r
+\r
+vapp_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+<VApp xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ovfDescriptorUploaded="true" deployed="true" status="4" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" id="urn:vcloud:vapp:4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/ovf/environment/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.1.0.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\r
+<Link rel="power:powerOff" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/powerOff"/>\r
+<Link rel="power:reboot" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/reboot"/>\r
+<Link rel="power:reset" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/reset"/>\r
+<Link rel="power:shutdown" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/shutdown"/>\r
+<Link rel="power:suspend" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/suspend"/>\r
+<Link rel="deploy" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/deploy" type="application/vnd.vmware.vcloud.deployVAppParams+xml"/>\r
+<Link rel="undeploy" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/9489a59a-0339-4151-9667-f5b90296c36d" name="External-Network-1074" type="application/vnd.vmware.vcloud.vAppNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/379f083b-4057-4724-a128-ed5bc6672591" name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d" type="application/vnd.vmware.vcloud.vAppNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="ovf" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/ovf" type="text/xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\r
+<Link rel="snapshot:create" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>\r
+<LeaseSettingsSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml" ovf:required="false">\r
+<ovf:Info>Lease settings section</ovf:Info>\r
+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml"/> <DeploymentLeaseInSeconds>0</DeploymentLeaseInSeconds><StorageLeaseInSeconds>7776000</StorageLeaseInSeconds></LeaseSettingsSection>\r
+<ovf:StartupSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.startupSection+xml" vcloud:href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/startupSection/"><ovf:Info>VApp startup section</ovf:Info>\r
+<ovf:Item ovf:id="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" ovf:order="0" ovf:startAction="powerOn" ovf:startDelay="0" ovf:stopAction="powerOff" ovf:stopDelay="0"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/startupSection/" type="application/vnd.vmware.vcloud.startupSection+xml"/> </ovf:StartupSection><ovf:NetworkSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.networkSection+xml" vcloud:href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkSection/"><ovf:Info>The list of logical networks</ovf:Info>\r
+<ovf:Network ovf:name="External-Network-1074"><ovf:Description>External-Network-1074</ovf:Description></ovf:Network>\r
+<ovf:Network ovf:name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"><ovf:Description/></ovf:Network></ovf:NetworkSection>\r
+<NetworkConfigSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkConfigSection/" type="application/vnd.vmware.vcloud.networkConfigSection+xml" ovf:required="false"><ovf:Info>The configuration parameters for logical networks</ovf:Info>\r
+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkConfigSection/"   type="application/vnd.vmware.vcloud.networkConfigSection+xml"/><NetworkConfig networkName="External-Network-1074"><Link rel="repair" href="https://localhost/api/admin/network/9489a59a-0339-4151-9667-f5b90296c36d/action/reset"/>\r
+<Description>External-Network-1074</Description><Configuration><IpScopes><IpScope><IsInherited>false</IsInherited><Gateway>192.168.254.1</Gateway><Netmask>255.255.255.0</Netmask>\r
+<IsEnabled>true</IsEnabled><IpRanges><IpRange><StartAddress>192.168.254.100</StartAddress><EndAddress>192.168.254.199</EndAddress></IpRange></IpRanges></IpScope></IpScopes>\r
+<FenceMode>isolated</FenceMode><RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments></Configuration><IsDeployed>true</IsDeployed></NetworkConfig>\r
+<NetworkConfig networkName="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d">\r
+<Link rel="repair" href="https://localhost/api/admin/network/379f083b-4057-4724-a128-ed5bc6672591/action/reset"/><Description/><Configuration><IpScopes><IpScope><IsInherited>true</IsInherited>\r
+<Gateway>192.169.241.253</Gateway><Netmask>255.255.255.0</Netmask><Dns1>192.169.241.102</Dns1><DnsSuffix>corp.local</DnsSuffix><IsEnabled>true</IsEnabled><IpRanges><IpRange>\r
+<StartAddress>192.169.241.115</StartAddress><EndAddress>192.169.241.150</EndAddress></IpRange></IpRanges></IpScope></IpScopes>\r
+<ParentNetwork href="https://localhost/api/admin/network/d4307ff7-0e34-4d41-aab0-4c231a045088" id="d4307ff7-0e34-4d41-aab0-4c231a045088" name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"/><FenceMode>bridged</FenceMode><RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments></Configuration>\r
+<IsDeployed>true</IsDeployed></NetworkConfig></NetworkConfigSection><SnapshotSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false"><ovf:Info>Snapshot information section</ovf:Info></SnapshotSection><DateCreated>2017-09-21T01:15:31.627-07:00</DateCreated><Owner type="application/vnd.vmware.vcloud.owner+xml">\r
+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
+</Owner><InMaintenanceMode>false</InMaintenanceMode><Children>\r
+<Vm needsCustomization="false" nestedHypervisorEnabled="false" deployed="true" status="4" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" id="urn:vcloud:vm:47d12505-5968-4e16-95a7-18743edb0c8b" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b" type="application/vnd.vmware.vcloud.vm+xml">\r
+<Link rel="power:powerOff" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/powerOff"/>\r
+<Link rel="power:reboot" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/reboot"/>\r
+<Link rel="power:reset" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/reset"/>\r
+<Link rel="power:shutdown" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/shutdown"/>\r
+<Link rel="power:suspend" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/suspend"/>\r
+<Link rel="undeploy" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b" type="application/vnd.vmware.vcloud.vm+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
+<Link rel="metrics" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
+<Link rel="metrics" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
+<Link rel="screen:thumbnail" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen"/>\r
+<Link rel="screen:acquireTicket" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket"/>\r
+<Link rel="screen:acquireMksTicket" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket" type="application/vnd.vmware.vcloud.mksTicket+xml"/>\r
+<Link rel="media:insertMedia" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/media/action/insertMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
+<Link rel="media:ejectMedia" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/media/action/ejectMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
+<Link rel="disk:attach" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/disk/action/attach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
+<Link rel="disk:detach" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/disk/action/detach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
+<Link rel="installVmwareTools" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/installVMwareTools"/>\r
+<Link rel="customizeAtNextPowerOn" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/customizeAtNextPowerOn"/>\r
+<Link rel="snapshot:create" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>\r
+<Link rel="reconfigureVm" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/reconfigureVm" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" type="application/vnd.vmware.vcloud.vm+xml"/>\r
+<Link rel="up" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml"/><Description>Ubuntu-vm</Description>  <ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/"><ovf:Info>Virtual hardware requirements</ovf:Info><ovf:System><vssd:ElementName>Virtual Hardware Family</vssd:ElementName><vssd:InstanceID>0</vssd:InstanceID>    <vssd:VirtualSystemIdentifier>Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa</vssd:VirtualSystemIdentifier><vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType></ovf:System><ovf:Item>    <rasd:Address>00:50:56:01:12:a2</rasd:Address><rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>    <rasd:Connection vcloud:ipAddressingMode="DHCP" vcloud:ipAddress="12.19.21.20" vcloud:primaryNetworkConnection="true">testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d</rasd:Connection>    <rasd:Description>Vmxnet3 ethernet adapter on "testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"</rasd:Description>    <rasd:ElementName>Network adapter 0</rasd:ElementName>    <rasd:InstanceID>1</rasd:InstanceID>    <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>    <rasd:ResourceType>10</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:Address>0</rasd:Address>    <rasd:Description>SCSI Controller</rasd:Description>    <rasd:ElementName>SCSI Controller 0</rasd:ElementName>    <rasd:InstanceID>2</rasd:InstanceID>    <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>    <rasd:ResourceType>6</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:Description>Hard disk</rasd:Description>    <rasd:ElementName>Hard disk 1</rasd:ElementName>    <rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="40960" vcloud:storageProfileOverrideVmDefault="false"/>    <rasd:InstanceID>2000</rasd:InstanceID>    <rasd:Parent>2</rasd:Parent>    <rasd:ResourceType>17</rasd:ResourceType>    <rasd:VirtualQuantity>42949672960</rasd:VirtualQuantity>    <rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits></ovf:Item><ovf:Item>    <rasd:Address>0</rasd:Address>    <rasd:Description>SATA Controller</rasd:Description>    <rasd:ElementName>SATA Controller 0</rasd:ElementName>    <rasd:InstanceID>3</rasd:InstanceID>    <rasd:ResourceSubType>vmware.sata.ahci</rasd:ResourceSubType>    <rasd:ResourceType>20</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>    <rasd:Description>CD/DVD Drive</rasd:Description>    <rasd:ElementName>CD/DVD Drive 1</rasd:ElementName>    <rasd:HostResource/>    <rasd:InstanceID>16000</rasd:InstanceID>    <rasd:Parent>3</rasd:Parent>    <rasd:ResourceType>15</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>    <rasd:Description>Floppy Drive</rasd:Description>    <rasd:ElementName>Floppy Drive 1</rasd:ElementName>    <rasd:HostResource/>    <rasd:InstanceID>8000</rasd:InstanceID>    <rasd:ResourceType>14</rasd:ResourceType></ovf:Item><ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu">    <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>    <rasd:Description>Number of Virtual CPUs</rasd:Description>    <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>    <rasd:InstanceID>4</rasd:InstanceID>    <rasd:Reservation>0</rasd:Reservation>    <rasd:ResourceType>3</rasd:ResourceType>    <rasd:VirtualQuantity>1</rasd:VirtualQuantity>    <rasd:Weight>0</rasd:Weight>    <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>    <Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item><ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory">    <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>    <rasd:Description>Memory Size</rasd:Description>    <rasd:ElementName>1024 MB of memory</rasd:ElementName>    <rasd:InstanceID>5</rasd:InstanceID>    <rasd:Reservation>0</rasd:Reservation>    <rasd:ResourceType>4</rasd:ResourceType>    <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>    <rasd:Weight>0</rasd:Weight>    <Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/" type="application/vnd.vmware.vcloud.virtualHardwareSection+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/></ovf:VirtualHardwareSection><ovf:OperatingSystemSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:id="94" vcloud:type="application/vnd.vmware.vcloud.operatingSystemSection+xml" vmw:osType="ubuntu64Guest" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/operatingSystemSection/"><ovf:Info>Specifies the operating system installed</ovf:Info><ovf:Description>Ubuntu Linux (64-bit)</ovf:Description><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/operatingSystemSection/" type="application/vnd.vmware.vcloud.operatingSystemSection+xml"/></ovf:OperatingSystemSection><NetworkConnectionSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false"><ovf:Info>Specifies the available VM network connections</ovf:Info><PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex><NetworkConnection needsCustomization="false" network="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d">    <NetworkConnectionIndex>0</NetworkConnectionIndex>    <IpAddress>12.19.21.20</IpAddress>    <IsConnected>true</IsConnected>    <MACAddress>00:50:56:01:12:a2</MACAddress>    <IpAddressAllocationMode>DHCP</IpAddressAllocationMode></NetworkConnection><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/></NetworkConnectionSection><GuestCustomizationSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false"><ovf:Info>Specifies Guest OS Customization Settings</ovf:Info><Enabled>true</Enabled><ChangeSid>false</ChangeSid><VirtualMachineId>47d12505-5968-4e16-95a7-18743edb0c8b</VirtualMachineId><JoinDomainEnabled>false</JoinDomainEnabled><UseOrgSettings>false</UseOrgSettings><AdminPasswordEnabled>false</AdminPasswordEnabled><AdminPasswordAuto>true</AdminPasswordAuto><AdminAutoLogonEnabled>false</AdminAutoLogonEnabled><AdminAutoLogonCount>0</AdminAutoLogonCount><ResetPasswordRequired>false</ResetPasswordRequired><ComputerName>Ubuntu-vm-001</ComputerName><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml"/></GuestCustomizationSection><RuntimeInfoSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/runtimeInfoSection"><ovf:Info>Specifies Runtime info</ovf:Info><VMWareTools version="2147483647"/></RuntimeInfoSection><SnapshotSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false"><ovf:Info>Snapshot information section</ovf:Info></SnapshotSection><DateCreated>2017-09-21T01:15:53.863-07:00</DateCreated><VAppScopedLocalId>Ubuntu-vm</VAppScopedLocalId><ovfenv:Environment xmlns:ns11="http://www.vmware.com/schema/ovfenv" ovfenv:id="" ns11:vCenterId="vm-7833"><ovfenv:PlatformSection>    <ovfenv:Kind>VMware ESXi</ovfenv:Kind>    <ovfenv:Version>6.0.0</ovfenv:Version>    <ovfenv:Vendor>VMware, Inc.</ovfenv:Vendor>    <ovfenv:Locale>en</ovfenv:Locale></ovfenv:PlatformSection><ovfenv:PropertySection>    <ovfenv:Property ovfenv:key="vCloud_UseSysPrep" ovfenv:value="None"/>    <ovfenv:Property ovfenv:key="vCloud_bitMask" ovfenv:value="1"/>    <ovfenv:Property ovfenv:key="vCloud_bootproto_0" ovfenv:value="dhcp"/>    <ovfenv:Property ovfenv:key="vCloud_computerName" ovfenv:value="Ubuntu-vm-001"/>    <ovfenv:Property ovfenv:key="vCloud_macaddr_0" ovfenv:value="00:50:56:01:12:a2"/>    <ovfenv:Property ovfenv:key="vCloud_markerid" ovfenv:value="c743cbe8-136e-4cf8-9e42-b291646b8058"/>    <ovfenv:Property ovfenv:key="vCloud_numnics" ovfenv:value="1"/>    <ovfenv:Property ovfenv:key="vCloud_primaryNic" ovfenv:value="0"/>    <ovfenv:Property ovfenv:key="vCloud_reconfigToken" ovfenv:value="246124151"/>    <ovfenv:Property ovfenv:key="vCloud_resetPassword" ovfenv:value="0"/></ovfenv:PropertySection><ve:EthernetAdapterSection xmlns:ve="http://www.vmware.com/schema/ovfenv" xmlns="http://schemas.dmtf.org/ovf/environment/1" xmlns:oe="http://schemas.dmtf.org/ovf/environment/1">    <ve:Adapter ve:mac="00:50:56:01:12:a2" ve:network="DPG-MGMT-3151" ve:unitNumber="7"/></ve:EthernetAdapterSection></ovfenv:Environment><VmCapabilities href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"/><MemoryHotAddEnabled>false</MemoryHotAddEnabled><CpuHotAddEnabled>false</CpuHotAddEnabled></VmCapabilities><StorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/></Vm></Children></VApp>"""\r
+\r
+poweroff_task_xml = """<?xml version="1.0" encoding="UTF-8"?>\r
+                <Task xmlns="http://www.vmware.com/vcloud/v1.5" cancelRequested="false" expiryTime="2017-12-22T23:18:23.040-08:00" operation="Powering Off Virtual Application Test1_vm-f370dafc-4aad-4415-bad9-68509dda67c9(f26ebf0a-f675-4622-83a6-64c6401769ac)" operationName="vappPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2017-09-23T23:18:23.040-07:00" status="queued" name="task" id="urn:vcloud:task:26975b6e-310e-4ed9-914e-ba7051eaabcb" href="https://localhost/api/task/26975b6e-310e-4ed9-914e-ba7051eaabcb" type="application/vnd.vmware.vcloud.task+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+               <Owner href="https://localhost/api/vApp/vapp-f26ebf0a-f675-4622-83a6-64c6401769ac" name="Test1_vm-f370dafc-4aad-4415-bad9-68509dda67c9" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+               <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
+               <Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
+               <Details/>\r
+               </Task>"""\r
+\r
+org_xml_response = """<Org xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Org3" id="urn:vcloud:org:2cb3dffb-5c51-4355-8406-28553ead28ac" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Link rel="down" href="https://localhost/api/vdc/216648ae-1b91-412b-b821-e4c301ff27d2" name="osm" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+<Link rel="down" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" name="Org3-VDC-PVDC1" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+<Link rel="down" href="https://localhost/api/vdc/414fdda9-3556-478c-a496-2deeec39cd30" name="osm1" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
+<Link rel="down" href="https://localhost/api/tasksList/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.tasksList+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a" name="cirros034" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51" name="Ubuntu_1nic" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46" name="cirros_10" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d" name="Test_Cirros" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2" name="de4ffcf2ad21f1a5d0714d6b868e2645" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890" name="Ubuntu_plugtest-1" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/b31e6973-86d2-404b-a522-b16846d099dc" name="Ubuntu_Cat" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/b31e6973-86d2-404b-a522-b16846d099dc/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031" name="Ubuntu16" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="add" href="https://localhost/api/admin/org/2cb3dffb-5c51-4355-8406-28553ead28ac/catalogs" type="application/vnd.vmware.admin.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/090ffa68-9be6-4d74-af45-9a071544a633" name="default.cirros_ns.cirros_nsd_vld1-73a7d683-af17-49ff-95d3-72f8feb25537" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/17f3a12f-16f8-44a1-99e9-9a0122a7ac41" name="default.ass.management-3979591d-ea4e-4254-b4c4-4052107e4aca" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/326aee79-4f5c-439c-8ead-1bbfa42d2e51" name="default.Testvm11.management-fe46ba91-3b36-4964-9ad2-e91b475b3d23" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/420c24c7-89e9-49e5-ba6d-d21bfb9af94b" name="cirros_nsd_vld1-ea8aec47-0a6c-4fdb-814f-7a743e31407a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/42cba4fd-7baa-4f53-bda0-b36dada672d0" name="default.cirros_ns.cirros_nsd_vld1-44dff01a-2bdb-4096-a916-7e9826bfa401" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/4ae9fec5-7ed0-4d5e-b0f3-f5289bdf6471" name="default.cirros_ns.cirros_nsd_vld1-9f547589-37b7-4d7d-8890-8d3dd479ff5b" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/4b2ecfa9-6a70-4fe4-9d79-b3f74df91e85" name="default.cirros_ns.cirros_nsd_vld1-43852bce-6109-4949-b63a-deec9d7daab2" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/693f72af-ae42-42e5-956e-25723628bf26" name="default.cirros_ns.cirros_nsd_vld1-8cd70d26-ba81-4a04-aa82-67a994b3e21c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/6d9fbd4c-f0b9-4033-a13f-a7c8990b01de" name="default.vcd.management-f05b9ad3-7480-4ee6-ab8d-92b1f3c0b265" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/6e3e9f57-cee4-433a-883b-0bbe9760e99d" name="default" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/7787cdd7-9577-4966-ba72-8fbbff5d2553" name="default.cirros_ns.cirros_nsd_vld1-ab1f2288-ff59-488c-af02-c8d5e34e0847" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/7fa723e3-cd6c-4680-9522-e644eb31a188" name="default.cirros_ns.cirros_nsd_vld1-285865bb-736c-4b3d-8618-d755928daf5c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/9030a222-4562-43a0-abc6-aa60c7c1aae0" name="default.cirros_ns.cirros_nsd_vld1-57248151-de72-4313-a84f-b090d8c3feb8" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/a4bd508c-1325-41b0-8c25-61cb7b83cde7" name="default.cirros_ns.cirros_nsd_vld1-491dfb8d-6b4b-41ab-b3e8-a5148e110bba" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/a719292f-0a7f-4e03-a346-183f23f3e60c" name="default.cirros_ns.cirros_nsd_vld1-7ba57204-eed1-4dc8-8698-60a71bbae715" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/a73574ef-16d4-4357-adbf-a0997eb5eb75" name="default.cirros_ns.cirros_nsd_vld1-4430f367-3fc8-4367-9bf1-96dbc244abe6" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/a94e9ba0-e959-47d6-87c0-70e8cb1b485a" name="default.cirros_ns.cirros_nsd_vld1-c56c51c5-e5a8-44fe-9d36-1f2cbd9a7137" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/ab88a587-ff82-4fa7-8225-c0e3eddbf6e6" name="cirros_nsd_vld1-0ed4b7e9-dd56-4f8b-b92f-829b9de95f66" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/b141d722-c96b-4ac5-90da-3d407d376431" name="cirros_nsd_vld1-ad2ebea3-7a0b-4995-91bb-c16bc6fd4b0e" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/c4d61fd6-4d1e-446c-949f-9eb42e0ccc63" name="default.cirros_ns.cirros_nsd_vld1-021a0669-1833-4a0b-a782-30ceed2cca7a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/cd466f6f-fdc5-404a-9136-320aaa9e3c16" name="default.cirros_ns.cirros_nsd_vld1-22e6962e-6488-47ad-bfad-41bc599abfcd" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/cde04227-8f87-4956-b1f1-9f1be1241b8b" name="default.cirros_ns.cirros_nsd_vld1-629da038-a216-48c5-9ae2-aa4d5dea057c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/e9812bec-ded8-423d-9807-354adc5720aa" name="default.cirros_ns.cirros_nsd_vld1-ba7fcc4f-fa76-49b1-8fa0-2b0791141fdd" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/eec8ef17-e379-4e40-a743-4ecec6afe616" name="cirros_nsd_vld1-aa9832d6-7d7a-4ac9-be56-cd171063818b" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/ef16d51c-3a54-4eea-bc15-9aa1e92b140f" name="default.cirros_ns.cirros_nsd_vld1-fe7170ad-0b0a-491d-b585-4de31e758ad7" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/f1554f21-4a7b-40be-9a34-a1b640c13398" name="default.Test21.cirros_nsd_vld1-c8f2b860-6794-4c8e-9a5b-3f107f23bbc4" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/fcbbf40a-6578-4054-b496-f10504b94b21" name="default.cirros_ns.cirros_nsd_vld1-a3021c0f-a0fe-413d-9067-cb9182e1f614" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/1fd6421e-929a-4576-bc19-a0c48aea1969" name="default" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/41b8a539-6927-4ec4-a411-aedae8129c45" name="test001.vld2-name-e34e32fd-6d3f-4d24-9d29-e8dab46e515a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/437258c7-a221-48cd-b889-d24b2fc15087" name="Mgmt-Network-3151" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/bfd56159-9178-4021-a5d8-9ec050569b0c" name="test001.net_internal_name-34602686-3619-4356-98e9-27f6e13e84ad" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/08a0276e-d0fb-4223-92ae-003857ccd38f" name="pytest-09/20/17-05:26:01-cirros_nsd.cirros_nsd_vld1-d6688412-e82a-4cf7-aa77-400beb70dbbf" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/294f2cba-9a81-49c5-bb73-fdaa6644c6ec" name="pytest-09/20/17-03:47:31-cirros_nsd.cirros_nsd_vld1-bd7e8e04-d075-4851-b550-0cf9737c7c8d" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/network/d660e25b-8049-4e8f-a4b8-6811465197d7" name="Ns1.mgmt-dee74b34-51a5-4caa-aafe-d0c896e53828" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
+<Link rel="down" href="https://localhost/api/supportedSystemsInfo/" type="application/vnd.vmware.vcloud.supportedSystemsInfo+xml"/>\r
+<Link rel="down" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="down" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/hybrid" type="application/vnd.vmware.vcloud.hybridOrg+xml"/>\r
+<Link rel="alternate" href="https://localhost/api/admin/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.admin.organization+xml"/>\r
+<Link rel="down" href="https://localhost/api/vdcTemplates" type="application/vnd.vmware.admin.vdcTemplates+xml"/>\r
+<Link rel="instantiate" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/action/instantiate" type="application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"/>\r
+<Description/><FullName>Organization 3</FullName>\r
+</Org>\r
+"""\r
+\r
+delete_catalog_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\n<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="80d8488f67ba1de98b7f485fba6abbd2" id="urn:vcloud:catalog:f3bf3733-465b-419f-b675-52f91d18edbb" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
+<Link rel="copy" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
+<Link rel="move" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Link rel="controlAccess" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
+<Description>80d8488f67ba1de98b7f485fba6abbd2</Description>\r
+<CatalogItems>\r
+    <CatalogItem href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" id="8a984fdd-d2cb-4d58-a739-2ea12560aded" name="80d8488f67ba1de98b7f485fba6abbd2" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
+</CatalogItems>\r
+<IsPublished>\r
+    false\r
+</IsPublished>\r
+<DateCreated>2017-09-24T02:30:23.623-07:00</DateCreated>\r
+<VersionNumber>2</VersionNumber>\r
+</Catalog>"""\r
+\r
+delete_catalog_item_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" size="0" name="80d8488f67ba1de98b7f485fba6abbd2" id="urn:vcloud:catalogitem:8a984fdd-d2cb-4d58-a739-2ea12560aded" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" type="application/vnd.vmware.vcloud.catalogItem+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Link rel="up" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
+<Link rel="down" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="edit" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
+<Link rel="remove" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded"/>    <Description>medial_file_name vApp Template</Description>\r
+<Entity href="https://localhost/api/vAppTemplate/vappTemplate-2731194b-637a-45f5-8e6d-dc65690302f7" name="80d8488f67ba1de98b7f485fba6abbd2" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>   <DateCreated>2017-09-24T02:30:26.380-07:00</DateCreated>\r
+<VersionNumber>1</VersionNumber>\r
+</CatalogItem>"""\r
+\r
+undeploy_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" expiryTime="2018-07-17T23:53:10.781-07:00" operation="Stopping Virtual Application Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46(86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed)" operationName="vappUndeployPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2018-04-18T23:53:10.781-07:00" status="queued" name="task" id="urn:vcloud:task:5ca0a79f-c025-47b9-9f20-b6a04fd67ea3" href="https://localhost/api/task/5ca0a79f-c025-47b9-9f20-b6a04fd67ea3" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Owner href="https://localhost/api/vApp/vapp-86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed" name="Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
+<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
+<Details/>\r
+</Task>\r
+"""\r
+\r
+delete_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" expiryTime="2018-07-17T23:54:11.696-07:00" operation="Deleting Virtual Application Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46(86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed)" operationName="vdcDeleteVapp" serviceNamespace="com.vmware.vcloud" startTime="2018-04-18T23:54:11.696-07:00" status="queued" name="task" id="urn:vcloud:task:f0399f4e-ddd5-4050-959f-5970ba0a63e6" href="https://localhost/api/task/f0399f4e-ddd5-4050-959f-5970ba0a63e6" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Owner href="https://localhost/api/vApp/vapp-86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed" name="Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
+<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
+<Details/>\r
+</Task>"""\r
+\r
+status_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" endTime="2018-04-19T01:24:46.643-07:00" expiryTime="2018-07-18T01:24:39.363-07:00" operation="Powered Off Virtual Application Test1_vm-fa13aee3-fb79-456f-8ce9-17f029ec4324(e9765c7a-b0de-4663-9db9-028bf0031f4d)" operationName="vappPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2018-04-19T01:24:39.363-07:00" status="success" name="task" id="urn:vcloud:task:17ebe394-b419-4612-ab55-cad3000d780a" href="https://localhost/api/task/17ebe394-b419-4612-ab55-cad3000d780a" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
+<Owner href="https://localhost/api/vApp/vapp-e9765c7a-b0de-4663-9db9-028bf0031f4d" name="Test1_vm-fa13aee3-fb79-456f-8ce9-17f029ec4324" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
+<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
+<Details/>\r
+</Task>\r
+"""\r
+\r
+vm_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
+<Vm xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" needsCustomization="false" nestedHypervisorEnabled="false" deployed="true" status="4" name="Ubuntu_no_nic" id="urn:vcloud:vm:53a529b2-10d8-4d56-a7ad-8182acdbe71c" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c" type="application/vnd.vmware.vcloud.vm+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/ovf/environment/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.1.0.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\r
+<Link rel="power:powerOff" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/powerOff"/>\r
+<Link rel="power:reboot" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/reboot"/>    <Link rel="power:reset" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/reset"/>\r
+<Link rel="power:shutdown" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/shutdown"/>\r
+<Link rel="power:suspend" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/suspend"/> <Link rel="undeploy" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c" type="application/vnd.vmware.vcloud.vm+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
+<Link rel="metrics" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
+<Link rel="metrics" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
+<Link rel="screen:thumbnail" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen"/>\r
+<Link rel="screen:acquireTicket" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen/action/acquireTicket"/>\r
+<Link rel="screen:acquireMksTicket" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen/action/acquireMksTicket" type="application/vnd.vmware.vcloud.mksTicket+xml"/>\r
+<Link rel="media:insertMedia" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/media/action/insertMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
+<Link rel="media:ejectMedia" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/media/action/ejectMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
+<Link rel="disk:attach" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/disk/action/attach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
+<Link rel="disk:detach" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/disk/action/detach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
+<Link rel="installVmwareTools" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/installVMwareTools"/>\r
+<Link rel="customizeAtNextPowerOn" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/customizeAtNextPowerOn"/>\r
+<Link rel="snapshot:create" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>\r
+<Link rel="reconfigureVm" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/reconfigureVm" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vm+xml"/>\r
+<Link rel="up" href="https://localhost/api/vApp/vapp-5a5ca3da-3826-4fe4-83c5-c018ad1765fa" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
+<Description/>\r
+<ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/">\r
+<ovf:Info>Virtual hardware requirements</ovf:Info>\r
+<ovf:System>\r
+<vssd:ElementName>Virtual Hardware Family</vssd:ElementName>\r
+<vssd:InstanceID>0</vssd:InstanceID>\r
+<vssd:VirtualSystemIdentifier>Ubuntu_no_nic</vssd:VirtualSystemIdentifier>\r
+<vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>\r
+</ovf:System><ovf:Item>\r
+<rasd:Address>00:50:56:01:14:1a</rasd:Address>\r
+<rasd:AddressOnParent>0</rasd:AddressOnParent>\r
+<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\r
+<rasd:Connection vcloud:ipAddressingMode="DHCP" vcloud:ipAddress="172.16.27.72" vcloud:primaryNetworkConnection="true">testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce</rasd:Connection>\r
+<rasd:Description>Vmxnet3 ethernet adapter on "testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce"\r
+</rasd:Description><rasd:ElementName>Network adapter 0</rasd:ElementName>\r
+<rasd:InstanceID>1</rasd:InstanceID>\r
+<rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>\r
+<rasd:ResourceType>10</rasd:ResourceType></ovf:Item><ovf:Item>\r
+<rasd:Address>0</rasd:Address><rasd:Description>SCSI Controller</rasd:Description>\r
+<rasd:ElementName>SCSI Controller 0</rasd:ElementName>\r
+<rasd:InstanceID>2</rasd:InstanceID>\r
+<rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>\r
+<rasd:ResourceType>6</rasd:ResourceType></ovf:Item><ovf:Item>\r
+<rasd:AddressOnParent>0</rasd:AddressOnParent>\r
+<rasd:Description>Hard disk</rasd:Description>\r
+<rasd:ElementName>Hard disk 1</rasd:ElementName>\r
+<rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="10240" vcloud:storageProfileOverrideVmDefault="false"/>      <rasd:InstanceID>2000</rasd:InstanceID>\r
+<rasd:Parent>2</rasd:Parent><rasd:ResourceType>17</rasd:ResourceType>\r
+<rasd:VirtualQuantity>10737418240</rasd:VirtualQuantity>\r
+<rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits>\r
+</ovf:Item><ovf:Item><rasd:Address>1</rasd:Address>\r
+<rasd:Description>IDE Controller</rasd:Description>\r
+<rasd:ElementName>IDE Controller 1</rasd:ElementName>\r
+<rasd:InstanceID>3</rasd:InstanceID>\r
+<rasd:ResourceType>5</rasd:ResourceType>\r
+</ovf:Item><ovf:Item><rasd:AddressOnParent>0</rasd:AddressOnParent>\r
+<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\r
+<rasd:Description>CD/DVD Drive</rasd:Description>\r
+<rasd:ElementName>CD/DVD Drive 1</rasd:ElementName><rasd:HostResource/>\r
+<rasd:InstanceID>3002</rasd:InstanceID>\r
+<rasd:Parent>3</rasd:Parent>\r
+<rasd:ResourceType>15</rasd:ResourceType></ovf:Item><ovf:Item>\r
+<rasd:AddressOnParent>0</rasd:AddressOnParent>\r
+<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\r
+<rasd:Description>Floppy Drive</rasd:Description>\r
+<rasd:ElementName>Floppy Drive 1</rasd:ElementName>\r
+<rasd:HostResource/><rasd:InstanceID>8000</rasd:InstanceID>\r
+<rasd:ResourceType>14</rasd:ResourceType>\r
+</ovf:Item>\r
+<ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu">\r
+<rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>\r
+<rasd:Description>Number of Virtual CPUs</rasd:Description>\r
+<rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>\r
+<rasd:InstanceID>4</rasd:InstanceID>\r
+<rasd:Reservation>0</rasd:Reservation>\r
+<rasd:ResourceType>3</rasd:ResourceType>\r
+<rasd:VirtualQuantity>1</rasd:VirtualQuantity>\r
+<rasd:Weight>0</rasd:Weight>\r
+<vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item>\r
+<ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory">\r
+<rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>\r
+<rasd:Description>Memory Size</rasd:Description>\r
+<rasd:ElementName>1024 MB of memory</rasd:ElementName>\r
+<rasd:InstanceID>5</rasd:InstanceID>\r
+<rasd:Reservation>0</rasd:Reservation>\r
+<rasd:ResourceType>4</rasd:ResourceType>\r
+<rasd:VirtualQuantity>1024</rasd:VirtualQuantity>\r
+<rasd:Weight>0</rasd:Weight>\r
+<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+        </ovf:Item>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/" type="application/vnd.vmware.vcloud.virtualHardwareSection+xml"/>\r
+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
+    </ovf:VirtualHardwareSection>\r
+    <ovf:OperatingSystemSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:id="94" vcloud:type="application/vnd.vmware.vcloud.operatingSystemSection+xml" vmw:osType="ubuntu64Guest" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/operatingSystemSection/">\r
+        <ovf:Info>Specifies the operating system installed</ovf:Info>\r
+        <ovf:Description>Ubuntu Linux (64-bit)</ovf:Description>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/operatingSystemSection/" type="application/vnd.vmware.vcloud.operatingSystemSection+xml"/>\r
+    </ovf:OperatingSystemSection>\r
+    <NetworkConnectionSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\r
+        <ovf:Info>Specifies the available VM network connections</ovf:Info>\r
+        <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>\r
+        <NetworkConnection needsCustomization="false" network="testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce">\r
+            <NetworkConnectionIndex>0</NetworkConnectionIndex>\r
+            <IpAddress>172.16.27.72</IpAddress>\r
+            <IsConnected>true</IsConnected>\r
+            <MACAddress>00:50:56:01:14:1a</MACAddress>\r
+            <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>\r
+            <NetworkAdapterType>VMXNET3</NetworkAdapterType>\r
+        </NetworkConnection>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>\r
+    </NetworkConnectionSection>  \r
+    <NetworkConnectionSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\r
+        <ovf:Info>Specifies the available VM network connections</ovf:Info>\r
+        <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>\r
+        <NetworkConnection needsCustomization="false" network="testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce">\r
+            <NetworkConnectionIndex>0</NetworkConnectionIndex>\r
+            <IpAddress>172.16.27.72</IpAddress>\r
+            <IsConnected>true</IsConnected>\r
+            <MACAddress>00:50:56:01:14:1a</MACAddress>\r
+            <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>\r
+        </NetworkConnection>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>\r
+    </NetworkConnectionSection>\r
+    <GuestCustomizationSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false">\r
+        <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>\r
+        <Enabled>true</Enabled>\r
+        <ChangeSid>false</ChangeSid>\r
+        <VirtualMachineId>53a529b2-10d8-4d56-a7ad-8182acdbe71c</VirtualMachineId>\r
+        <JoinDomainEnabled>false</JoinDomainEnabled>\r
+        <UseOrgSettings>false</UseOrgSettings>\r
+        <AdminPasswordEnabled>false</AdminPasswordEnabled>\r
+        <AdminPasswordAuto>true</AdminPasswordAuto>\r
+        <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>\r
+        <AdminAutoLogonCount>0</AdminAutoLogonCount>\r
+        <ResetPasswordRequired>false</ResetPasswordRequired>\r
+        <ComputerName>Ubuntunonic-001</ComputerName>\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml"/>\r
+    </GuestCustomizationSection>\r
+    <RuntimeInfoSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/runtimeInfoSection">\r
+        <ovf:Info>Specifies Runtime info</ovf:Info>\r
+        <VMWareTools version="2147483647"/>\r
+    </RuntimeInfoSection>\r
+    <SnapshotSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false">\r
+        <ovf:Info>Snapshot information section</ovf:Info>\r
+    </SnapshotSection>\r
+    <DateCreated>2018-04-19T04:19:28.150-07:00</DateCreated>\r
+    <VAppScopedLocalId>Ubuntu_no_nic</VAppScopedLocalId>\r
+    <ovfenv:Environment xmlns:ns11="http://www.vmware.com/schema/ovfenv" ovfenv:id="" ns11:vCenterId="vm-8971">\r
+        <ovfenv:PlatformSection>\r
+<ovfenv:Kind>VMware ESXi</ovfenv:Kind>\r
+<ovfenv:Version>6.0.0</ovfenv:Version>\r
+<ovfenv:Vendor>VMware, Inc.</ovfenv:Vendor>\r
+<ovfenv:Locale>en</ovfenv:Locale>\r
+        </ovfenv:PlatformSection>\r
+        <ovfenv:PropertySection>\r
+<ovfenv:Property ovfenv:key="vCloud_UseSysPrep" ovfenv:value="None"/>\r
+<ovfenv:Property ovfenv:key="vCloud_bitMask" ovfenv:value="1"/>\r
+<ovfenv:Property ovfenv:key="vCloud_bootproto_0" ovfenv:value="dhcp"/>\r
+<ovfenv:Property ovfenv:key="vCloud_computerName" ovfenv:value="Ubuntunonic-001"/>\r
+<ovfenv:Property ovfenv:key="vCloud_macaddr_0" ovfenv:value="00:50:56:01:14:1a"/>\r
+<ovfenv:Property ovfenv:key="vCloud_markerid" ovfenv:value="ec8b90ea-cb5d-43b4-8910-91380ff29d97"/>\r
+<ovfenv:Property ovfenv:key="vCloud_numnics" ovfenv:value="1"/>\r
+<ovfenv:Property ovfenv:key="vCloud_primaryNic" ovfenv:value="0"/>\r
+<ovfenv:Property ovfenv:key="vCloud_reconfigToken" ovfenv:value="132681259"/>\r
+<ovfenv:Property ovfenv:key="vCloud_resetPassword" ovfenv:value="0"/>\r
+        </ovfenv:PropertySection>\r
+        <ve:EthernetAdapterSection xmlns:ve="http://www.vmware.com/schema/ovfenv" xmlns="http://schemas.dmtf.org/ovf/environment/1" xmlns:oe="http://schemas.dmtf.org/ovf/environment/1">\r
+<ve:Adapter ve:mac="00:50:56:01:14:1a" ve:network="DPG-MGMT-3151" ve:unitNumber="7"/>\r
+   \r
+        </ve:EthernetAdapterSection>\r
+    </ovfenv:Environment>\r
+    <VmCapabilities href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml">\r
+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"/>\r
+        <MemoryHotAddEnabled>false</MemoryHotAddEnabled>\r
+        <CpuHotAddEnabled>false</CpuHotAddEnabled>\r
+    </VmCapabilities>\r
+    <StorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\r
+</Vm>"""\r
+\r
+delete_tenant = """<?xml version="1.0" encoding="UTF-8"?>\n<Vdc xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5" status="1" name="testing_Cqm5fiZ" id="urn:vcloud:vdc:753227f5-d6c6-4478-9546-acc5cfff21e9" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.vcloud.vdc+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/vcloud/extension/v1.5 http://localhost/api/v1.5/schema/vmwextensions.xsd">\n    <VCloudExtension required="false">\n        <vmext:VimObjectRef>\n            <vmext:VimServerRef href="https://localhost/api/admin/extension/vimServer/cc82baf9-9f80-4468-bfe9-ce42b3f9dde5" name="VC" type="application/vnd.vmware.admin.vmwvirtualcenter+xml"/>\n            <vmext:MoRef>resgroup-9025</vmext:MoRef>\n            <vmext:VimObjectType>RESOURCE_POOL</vmext:VimObjectType>\n        </vmext:VimObjectRef>\n    </VCloudExtension>\n    <Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\n    <Link rel="down" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="edit" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="remove" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/uploadVAppTemplate" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/media" type="application/vnd.vmware.vcloud.media+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/instantiateOvf" type="application/vnd.vmware.vcloud.instantiateOvfParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/instantiateVAppTemplate" type="application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneVApp" type="application/vnd.vmware.vcloud.cloneVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneVAppTemplate" type="application/vnd.vmware.vcloud.cloneVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneMedia" type="application/vnd.vmware.vcloud.cloneMediaParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/composeVApp" type="application/vnd.vmware.vcloud.composeVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/disk" type="application/vnd.vmware.vcloud.diskCreateParams+xml"/>\n    <Link rel="edgeGateways" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/edgeGateways" type="application/vnd.vmware.vcloud.query.records+xml"/>\n    <Link rel="add" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/networks" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\n    <Link rel="orgVdcNetworks" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/networks" type="application/vnd.vmware.vcloud.query.records+xml"/>\n    <Link rel="alternate" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.admin.vdc+xml"/>\n    <Description>opnemano</Description>\n    <AllocationModel>AllocationVApp</AllocationModel>\n    <ComputeCapacity>\n        <Cpu>\n            <Units>MHz</Units>\n            <Allocated>0</Allocated>\n            <Limit>2048</Limit>\n            <Reserved>0</Reserved>\n            <Used>0</Used>\n            <Overhead>0</Overhead>\n        </Cpu>\n        <Memory>\n            <Units>MB</Units>\n            <Allocated>0</Allocated>\n            <Limit>2048</Limit>\n            <Reserved>0</Reserved>\n            <Used>0</Used>\n            <Overhead>0</Overhead>\n        </Memory>\n    </ComputeCapacity>\n    <ResourceEntities/>\n    <AvailableNetworks/>\n    <Capabilities>\n        <SupportedHardwareVersions>\n            <SupportedHardwareVersion>vmx-04</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-07</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-08</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-09</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-10</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-11</SupportedHardwareVersion>\n        </SupportedHardwareVersions>\n    </Capabilities>\n    <NicQuota>100</NicQuota>\n    <NetworkQuota>100</NetworkQuota>\n    <UsedNetworkCount>0</UsedNetworkCount>\n    <VmQuota>50</VmQuota>\n    <IsEnabled>true</IsEnabled>\n    <VdcStorageProfiles>\n        <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/37ec8982-e6c3-4fba-a107-0fa36fe292d0" name="NFS Storage Policy" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n    </VdcStorageProfiles>\n    <VCpuInMhz2>1000</VCpuInMhz2>\n</Vdc>\n"""\r
+\r
+catalog_list_xml = """<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" size="0" name="Ubuntu_no_nic" id="urn:vcloud:catalogitem:d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd"><Link rel="up" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" type="application/vnd.vmware.vcloud.catalog+xml"/><Link rel="down" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/><Link rel="edit" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/><Link rel="remove" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad"/><Description/><Entity href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/><DateCreated>2017-10-14T23:52:58.097-07:00</DateCreated><VersionNumber>1</VersionNumber></CatalogItem>"""\r
+\r
+catalogItem_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" size="0" name="Ubuntu_no_nic" id="urn:vcloud:catalogitem:d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\n    <Link rel="up" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" type="application/vnd.vmware.vcloud.catalog+xml"/>\n    <Link rel="down" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="edit" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\n    <Link rel="remove" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad"/>\n    <Description/>\n    <Entity href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n    <DateCreated>2017-10-14T23:52:58.097-07:00</DateCreated>\n    <VersionNumber>1</VersionNumber>\n</CatalogItem>"""\r
+\r
+vapp_template_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<VAppTemplate xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" goldMaster="false" ovfDescriptorUploaded="true" status="8" name="Ubuntu_no_nic" id="urn:vcloud:vapptemplate:593e3130-ac0b-44f1-8289-14329dcc5435" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\n    <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="catalogItem" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\n    <Link rel="remove" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435"/>\n    <Link rel="edit" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n    <Link rel="enable" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/action/enableDownload"/>\n    <Link rel="disable" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/action/disableDownload"/>\n    <Link rel="ovf" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/ovf" type="text/xml"/>\n    <Link rel="storageProfile" href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n    <Description/>\n    <Owner type="application/vnd.vmware.vcloud.owner+xml">\n        <User href="https://localhost/api/admin/user/4e1905dc-7c0b-4013-b763-d01960853f49" name="system" type="application/vnd.vmware.admin.user+xml"/>\n    </Owner>\n    <Children>\n        <Vm goldMaster="false" status="8" name="Ubuntu_no_nic" id="urn:vcloud:vm:bd3fe155-3fb2-40a8-af48-89c276983166" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166" type="application/vnd.vmware.vcloud.vm+xml">\n            <Link rel="up" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n            <Link rel="storageProfile" href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n            <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n            <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n            <Description/>\n            <NetworkConnectionSection href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\n                <ovf:Info>Specifies the available VM network connections</ovf:Info>\n            </NetworkConnectionSection>\n            <GuestCustomizationSection href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false">\n                <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>\n                <Enabled>true</Enabled>\n                <ChangeSid>false</ChangeSid>\n                <VirtualMachineId>bd3fe155-3fb2-40a8-af48-89c276983166</VirtualMachineId>\n                <JoinDomainEnabled>false</JoinDomainEnabled>\n                <UseOrgSettings>false</UseOrgSettings>\n                <AdminPasswordEnabled>false</AdminPasswordEnabled>\n                <AdminPasswordAuto>true</AdminPasswordAuto>\n                <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>\n                <AdminAutoLogonCount>0</AdminAutoLogonCount>\n                <ResetPasswordRequired>false</ResetPasswordRequired>\n                <ComputerName>Ubuntunonic-001</ComputerName>\n            </GuestCustomizationSection>\n            <ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/">\n                <ovf:Info>Virtual hardware requirements</ovf:Info>\n                <ovf:System>\n                    <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>\n                    <vssd:InstanceID>0</vssd:InstanceID>\n                    <vssd:VirtualSystemIdentifier>Ubuntu_no_nic</vssd:VirtualSystemIdentifier>\n                    <vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>\n                </ovf:System>\n                <ovf:Item>\n                    <rasd:Address>0</rasd:Address>\n                    <rasd:Description>SCSI Controller</rasd:Description>\n                    <rasd:ElementName>SCSI Controller 0</rasd:ElementName>\n                    <rasd:InstanceID>1</rasd:InstanceID>\n                    <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>\n                    <rasd:ResourceType>6</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:Description>Hard disk</rasd:Description>\n                    <rasd:ElementName>Hard disk 1</rasd:ElementName>\n                    <rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="5120" vcloud:storageProfileOverrideVmDefault="false"/>\n                    <rasd:InstanceID>2000</rasd:InstanceID>\n                    <rasd:Parent>1</rasd:Parent>\n                    <rasd:ResourceType>17</rasd:ResourceType>\n                    <rasd:VirtualQuantity>5368709120</rasd:VirtualQuantity>\n                    <rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:Address>1</rasd:Address>\n                    <rasd:Description>IDE Controller</rasd:Description>\n                    <rasd:ElementName>IDE Controller 1</rasd:ElementName>\n                    <rasd:InstanceID>2</rasd:InstanceID>\n                    <rasd:ResourceType>5</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\n                    <rasd:Description>CD/DVD Drive</rasd:Description>\n                    <rasd:ElementName>CD/DVD Drive 1</rasd:ElementName>\n                    <rasd:HostResource/>\n                    <rasd:InstanceID>3002</rasd:InstanceID>\n                    <rasd:Parent>2</rasd:Parent>\n                    <rasd:ResourceType>15</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\n                    <rasd:Description>Floppy Drive</rasd:Description>\n                    <rasd:ElementName>Floppy Drive 1</rasd:ElementName>\n                    <rasd:HostResource/>\n                    <rasd:InstanceID>8000</rasd:InstanceID>\n                    <rasd:ResourceType>14</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>\n                    <rasd:Description>Number of Virtual CPUs</rasd:Description>\n                    <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>\n                    <rasd:InstanceID>3</rasd:InstanceID>\n                    <rasd:Reservation>0</rasd:Reservation>\n                    <rasd:ResourceType>3</rasd:ResourceType>\n                    <rasd:VirtualQuantity>1</rasd:VirtualQuantity>\n                    <rasd:Weight>0</rasd:Weight>\n                    <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>\n                    <rasd:Description>Memory Size</rasd:Description>\n                    <rasd:ElementName>1024 MB of memory</rasd:ElementName>\n                    <rasd:InstanceID>4</rasd:InstanceID>\n                    <rasd:Reservation>0</rasd:Reservation>\n                    <rasd:ResourceType>4</rasd:ResourceType>\n                    <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>\n                    <rasd:Weight>0</rasd:Weight>\n                </ovf:Item>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n            </ovf:VirtualHardwareSection>\n            <VAppScopedLocalId>Ubuntu_no_nic</VAppScopedLocalId>\n            <DateCreated>2017-10-14T23:52:58.790-07:00</DateCreated>\n        </Vm>\n    </Children>\n    <ovf:NetworkSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.networkSection+xml" vcloud:href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/networkSection/">\n        <ovf:Info>The list of logical networks</ovf:Info>\n    </ovf:NetworkSection>\n    <NetworkConfigSection href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/networkConfigSection/" type="application/vnd.vmware.vcloud.networkConfigSection+xml" ovf:required="false">\n        <ovf:Info>The configuration parameters for logical networks</ovf:Info>\n    </NetworkConfigSection>\n    <LeaseSettingsSection href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml" ovf:required="false">\n        <ovf:Info>Lease settings section</ovf:Info>\n        <Link rel="edit" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml"/>\n        <StorageLeaseInSeconds>7776000</StorageLeaseInSeconds>\n        <StorageLeaseExpiration>2018-08-22T02:41:54.567-07:00</StorageLeaseExpiration>\n    </LeaseSettingsSection>\n    <CustomizationSection goldMaster="false" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/customizationSection/" type="application/vnd.vmware.vcloud.customizationSection+xml" ovf:required="false">\n        <ovf:Info>VApp template customization section</ovf:Info>\n        <CustomizeOnInstantiate>true</CustomizeOnInstantiate>\n    </CustomizationSection>\n    <DateCreated>2017-10-14T23:52:58.790-07:00</DateCreated>\n</VAppTemplate>\n"""\r
+\r
+deployed_vapp_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<VApp xmlns="http://www.vmware.com/vcloud/v1.5" ovfDescriptorUploaded="true" deployed="false" status="0" name="Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a" id="urn:vcloud:vapp:8b3ab861-cc53-4bd8-bdd0-85a74af76c61" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61" type="application/vnd.vmware.vcloud.vApp+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\n    <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="ovf" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/ovf" type="text/xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n    <Description>Vapp instantiation</Description>\n    <Tasks>\n        <Task cancelRequested="false" expiryTime="2018-08-31T01:14:34.292-07:00" operation="Creating Virtual Application Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a(8b3ab861-cc53-4bd8-bdd0-85a74af76c61)" operationName="vdcInstantiateVapp" serviceNamespace="com.vmware.vcloud" startTime="2018-06-02T01:14:34.292-07:00" status="queued" name="task" id="urn:vcloud:task:1d588451-6b7d-43f4-b8c7-c9155dcd715a" href="https://localhost/api/task/1d588451-6b7d-43f4-b8c7-c9155dcd715a" type="application/vnd.vmware.vcloud.task+xml">\n            <Owner href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61" name="Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a" type="application/vnd.vmware.vcloud.vApp+xml"/>\n            <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\n            <Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\n            <Progress>1</Progress>\n            <Details/>\n        </Task>\n    </Tasks>\n    <DateCreated>2018-06-02T01:14:32.870-07:00</DateCreated>\n    <Owner type="application/vnd.vmware.vcloud.owner+xml">\n        <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\n    </Owner>\n    <InMaintenanceMode>false</InMaintenanceMode>\n</VApp>"""\r
diff --git a/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py b/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
new file mode 100644 (file)
index 0000000..e37c419
--- /dev/null
@@ -0,0 +1,6616 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact:  osslegalrouting@vmware.com
+##
+
+"""
+vimconn_vmware implementation an Abstract class in order to interact with VMware  vCloud Director.
+mbayramov@vmware.com
+"""
+from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
+
+from osm_ro import vimconn
+import os
+import shutil
+import subprocess
+import tempfile
+import traceback
+import itertools
+import requests
+import ssl
+import atexit
+
+from pyVmomi import vim, vmodl
+from pyVim.connect import SmartConnect, Disconnect
+
+from xml.etree import ElementTree as XmlElementTree
+from lxml import etree as lxmlElementTree
+
+import yaml
+from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
+from pyvcloud.vcd.vdc import VDC
+from pyvcloud.vcd.org import Org
+import re
+from pyvcloud.vcd.vapp import VApp
+from xml.sax.saxutils import escape
+import logging
+import json
+import time
+import uuid
+# import httplib
+#For python3
+#import http.client
+import hashlib
+import socket
+import struct
+import netaddr
+import random
+
+# global variable for vcd connector type
+STANDALONE = 'standalone'
+
+# key for flavor dicts
+FLAVOR_RAM_KEY = 'ram'
+FLAVOR_VCPUS_KEY = 'vcpus'
+FLAVOR_DISK_KEY = 'disk'
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
+                      'dhcp_enabled':True,
+                      'ip_version':"IPv4"
+                      }
+# global variable for wait time
+INTERVAL_TIME = 5
+MAX_WAIT_TIME = 1800
+
+API_VERSION = '27.0'
+
+__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
+__date__ = "$09-Mar-2018 11:09:29$"
+__version__ = '0.2'
+
+#     -1: "Could not be created",
+#     0: "Unresolved",
+#     1: "Resolved",
+#     2: "Deployed",
+#     3: "Suspended",
+#     4: "Powered on",
+#     5: "Waiting for user input",
+#     6: "Unknown state",
+#     7: "Unrecognized state",
+#     8: "Powered off",
+#     9: "Inconsistent state",
+#     10: "Children do not all have the same status",
+#     11: "Upload initiated, OVF descriptor pending",
+#     12: "Upload initiated, copying contents",
+#     13: "Upload initiated , disk contents pending",
+#     14: "Upload has been quarantined",
+#     15: "Upload quarantine period has expired"
+
+# mapping vCD status to MANO
+vcdStatusCode2manoFormat = {4: 'ACTIVE',
+                            7: 'PAUSED',
+                            3: 'SUSPENDED',
+                            8: 'INACTIVE',
+                            12: 'BUILD',
+                            -1: 'ERROR',
+                            14: 'DELETED'}
+
+#
+netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
+                        'ERROR': 'ERROR', 'DELETED': 'DELETED'
+                        }
+
+class vimconnector(vimconn.vimconnector):
+    # dict used to store flavor in memory
+    flavorlist = {}
+
+    def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
+                 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
+        """
+        Constructor create vmware connector to vCloud director.
+
+        By default construct doesn't validate connection state. So client can create object with None arguments.
+        If client specified username , password and host and VDC name.  Connector initialize other missing attributes.
+
+        a) It initialize organization UUID
+        b) Initialize tenant_id/vdc ID.   (This information derived from tenant name)
+
+        Args:
+            uuid - is organization uuid.
+            name - is organization name that must be presented in vCloud director.
+            tenant_id - is VDC uuid it must be presented in vCloud director
+            tenant_name - is VDC name.
+            url - is hostname or ip address of vCloud director
+            url_admin - same as above.
+            user - is user that administrator for organization. Caller must make sure that
+                    username has right privileges.
+
+            password - is password for a user.
+
+            VMware connector also requires PVDC administrative privileges and separate account.
+            This variables must be passed via config argument dict contains keys
+
+            dict['admin_username']
+            dict['admin_password']
+            config - Provide NSX and vCenter information
+
+            Returns:
+                Nothing.
+        """
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
+                                      url_admin, user, passwd, log_level, config)
+
+        self.logger = logging.getLogger('openmano.vim.vmware')
+        self.logger.setLevel(10)
+        self.persistent_info = persistent_info
+
+        self.name = name
+        self.id = uuid
+        self.url = url
+        self.url_admin = url_admin
+        self.tenant_id = tenant_id
+        self.tenant_name = tenant_name
+        self.user = user
+        self.passwd = passwd
+        self.config = config
+        self.admin_password = None
+        self.admin_user = None
+        self.org_name = ""
+        self.nsx_manager = None
+        self.nsx_user = None
+        self.nsx_password = None
+        self.availability_zone = None
+
+        # Disable warnings from self-signed certificates.
+        requests.packages.urllib3.disable_warnings()
+
+        if tenant_name is not None:
+            orgnameandtenant = tenant_name.split(":")
+            if len(orgnameandtenant) == 2:
+                self.tenant_name = orgnameandtenant[1]
+                self.org_name = orgnameandtenant[0]
+            else:
+                self.tenant_name = tenant_name
+        if "orgname" in config:
+            self.org_name = config['orgname']
+
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+        try:
+            self.admin_user = config['admin_username']
+            self.admin_password = config['admin_password']
+        except KeyError:
+            raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
+
+        try:
+            self.nsx_manager = config['nsx_manager']
+            self.nsx_user = config['nsx_user']
+            self.nsx_password = config['nsx_password']
+        except KeyError:
+            raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+
+        self.vcenter_ip = config.get("vcenter_ip", None)
+        self.vcenter_port = config.get("vcenter_port", None)
+        self.vcenter_user = config.get("vcenter_user", None)
+        self.vcenter_password = config.get("vcenter_password", None)
+
+        #Set availability zone for Affinity rules
+        self.availability_zone = self.set_availability_zones()
+
+# ############# Stub code for SRIOV #################
+#         try:
+#             self.dvs_name = config['dv_switch_name']
+#         except KeyError:
+#             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
+#
+#         self.vlanID_range = config.get("vlanID_range", None)
+
+        self.org_uuid = None
+        self.client = None
+
+        if not url:
+            raise vimconn.vimconnException('url param can not be NoneType')
+
+        if not self.url_admin:  # try to use normal url
+            self.url_admin = self.url
+
+        logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
+                                                                              self.tenant_id, self.tenant_name))
+        logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
+        logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
+
+        # initialize organization
+        if self.user is not None and self.passwd is not None and self.url:
+            self.init_organization()
+
+    def __getitem__(self, index):
+        if index == 'name':
+            return self.name
+        if index == 'tenant_id':
+            return self.tenant_id
+        if index == 'tenant_name':
+            return self.tenant_name
+        elif index == 'id':
+            return self.id
+        elif index == 'org_name':
+            return self.org_name
+        elif index == 'org_uuid':
+            return self.org_uuid
+        elif index == 'user':
+            return self.user
+        elif index == 'passwd':
+            return self.passwd
+        elif index == 'url':
+            return self.url
+        elif index == 'url_admin':
+            return self.url_admin
+        elif index == "config":
+            return self.config
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+
+    def __setitem__(self, index, value):
+        if index == 'name':
+            self.name = value
+        if index == 'tenant_id':
+            self.tenant_id = value
+        if index == 'tenant_name':
+            self.tenant_name = value
+        elif index == 'id':
+            self.id = value
+        elif index == 'org_name':
+            self.org_name = value
+        elif index == 'org_uuid':
+            self.org_uuid = value
+        elif index == 'user':
+            self.user = value
+        elif index == 'passwd':
+            self.passwd = value
+        elif index == 'url':
+            self.url = value
+        elif index == 'url_admin':
+            self.url_admin = value
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+
+    def connect_as_admin(self):
+        """ Method connect as pvdc admin user to vCloud director.
+            There are certain action that can be done only by provider vdc admin user.
+            Organization creation / provider network creation etc.
+
+            Returns:
+                The return client object that latter can be used to connect to vcloud director as admin for provider vdc
+        """
+        self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
+
+        try:
+            host = self.url
+            org = 'System'
+            client_as_admin = Client(host, verify_ssl_certs=False)
+            client_as_admin.set_highest_supported_version()
+            client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
+        except Exception as e:
+            raise vimconn.vimconnException(
+                  "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
+
+        return client_as_admin
+
+    def connect(self):
+        """ Method connect as normal user to vCloud director.
+
+            Returns:
+                The return client object that latter can be used to connect to vCloud director as admin for VDC
+        """
+        try:
+            self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
+                                                                                      self.user,
+                                                                                      self.org_name))
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_highest_supported_version()
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+        except:
+            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                     "{} as user: {}".format(self.org_name, self.user))
+
+        return client
+
+    def init_organization(self):
+        """ Method initialize organization UUID and VDC parameters.
+
+            At bare minimum client must provide organization name that present in vCloud director and VDC.
+
+            The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
+            The Org - UUID will be initialized at the run time if data center present in vCloud director.
+
+            Returns:
+                The return vca object that letter can be used to connect to vcloud direct as admin
+        """
+        client = self.connect()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+
+        self.client = client
+        try:
+            if self.org_uuid is None:
+                org_list = client.get_org_list()
+                for org in org_list.Org:
+                    # we set org UUID at the init phase but we can do it only when we have valid credential.
+                    if org.get('name') == self.org_name:
+                        self.org_uuid = org.get('href').split('/')[-1]
+                        self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
+                        break
+                else:
+                    raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
+
+                # if well good we require for org details
+                org_details_dict = self.get_org(org_uuid=self.org_uuid)
+
+                # we have two case if we want to initialize VDC ID or VDC name at run time
+                # tenant_name provided but no tenant id
+                if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
+                    vdcs_dict = org_details_dict['vdcs']
+                    for vdc in vdcs_dict:
+                        if vdcs_dict[vdc] == self.tenant_name:
+                            self.tenant_id = vdc
+                            self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+                                                                                                    self.org_name))
+                            break
+                    else:
+                        raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
+                    # case two we have tenant_id but we don't have tenant name so we find and set it.
+                    if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
+                        vdcs_dict = org_details_dict['vdcs']
+                        for vdc in vdcs_dict:
+                            if vdc == self.tenant_id:
+                                self.tenant_name = vdcs_dict[vdc]
+                                self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+                                                                                                        self.org_name))
+                                break
+                        else:
+                            raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
+            self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
+        except:
+            self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
+            self.logger.debug(traceback.format_exc())
+            self.org_uuid = None
+
+    def new_tenant(self, tenant_name=None, tenant_description=None):
+        """ Method adds a new tenant to VIM with this name.
+            This action requires access to create VDC action in vCloud director.
+
+            Args:
+                tenant_name is tenant_name to be created.
+                tenant_description not used for this call
+
+            Return:
+                returns the tenant identifier in UUID format.
+                If action is failed method will throw vimconn.vimconnException method
+            """
+        vdc_task = self.create_vdc(vdc_name=tenant_name)
+        if vdc_task is not None:
+            vdc_uuid, value = vdc_task.popitem()
+            self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+            return vdc_uuid
+        else:
+            raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
+
+    def delete_tenant(self, tenant_id=None):
+        """ Delete a tenant from VIM
+             Args:
+                tenant_id is tenant_id to be deleted.
+
+            Return:
+                returns the tenant identifier in UUID format.
+                If action is failed method will throw exception
+        """
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+
+        if tenant_id is not None:
+            if vca._session:
+                #Get OrgVDC
+                url_list = [self.url, '/api/vdc/', tenant_id]
+                orgvdc_herf = ''.join(url_list)
+
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=orgvdc_herf,
+                                                headers=headers)
+
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("delete_tenant():GET REST API call {} failed. "\
+                                      "Return status code {}".format(orgvdc_herf,
+                                                                     response.status_code))
+                    raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
+                lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+                namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+                #For python3
+                #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+                namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+                vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+                vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
+
+                response = self.perform_request(req_type='DELETE',
+                                                url=vdc_remove_href,
+                                                headers=headers)
+
+                if response.status_code == 202:
+                    time.sleep(5)
+                    return tenant_id
+                else:
+                    self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
+                                      "Return status code {}".format(vdc_remove_href,
+                                                                     response.status_code))
+                    raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
+        else:
+            self.logger.debug("delete_tenant():Incorrect tenant ID  {}".format(tenant_id))
+            raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
+
+    def get_tenant_list(self, filter_dict={}):
+        """Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries:
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+
+        """
+        org_dict = self.get_org(self.org_uuid)
+        vdcs_dict = org_dict['vdcs']
+
+        vdclist = []
+        try:
+            for k in vdcs_dict:
+                entry = {'name': vdcs_dict[k], 'id': k}
+                # if caller didn't specify dictionary we return all tenants.
+                if filter_dict is not None and filter_dict:
+                    filtered_entry = entry.copy()
+                    filtered_dict = set(entry.keys()) - set(filter_dict)
+                    for unwanted_key in filtered_dict: del entry[unwanted_key]
+                    if filter_dict == entry:
+                        vdclist.append(filtered_entry)
+                else:
+                    vdclist.append(entry)
+        except:
+            self.logger.debug("Error in get_tenant_list()")
+            self.logger.debug(traceback.format_exc())
+            raise vimconn.vimconnException("Incorrect state. {}")
+
+        return vdclist
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+
+        self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
+                          .format(net_name, net_type, ip_profile, shared))
+
+        created_items = {}
+        isshared = 'false'
+        if shared:
+            isshared = 'true'
+
+# ############# Stub code for SRIOV #################
+#         if net_type == "data" or net_type == "ptp":
+#             if self.config.get('dv_switch_name') == None:
+#                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
+#             network_uuid = self.create_dvPort_group(net_name)
+
+        network_uuid = self.create_network(network_name=net_name, net_type=net_type,
+                                           ip_profile=ip_profile, isshared=isshared)
+        if network_uuid is not None:
+            return network_uuid, created_items
+        else:
+            raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
+
+    def get_vcd_network_list(self):
+        """ Method available organization for a logged in tenant
+
+            Returns:
+                The return vca object that letter can be used to connect to vcloud direct as admin
+        """
+
+        self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+
+        if not self.tenant_name:
+            raise vimconn.vimconnConnectionException("Tenant name is empty.")
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
+
+        vdc_uuid = vdc.get('id').split(":")[3]
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+        if response.status_code != 200:
+            self.logger.error("Failed to get vdc content")
+            raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+        else:
+            content = XmlElementTree.fromstring(response.content)
+
+        network_list = []
+        try:
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            filter_dict = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("get_vcd_network_list(): Adding network {} "
+                                                  "to a list vcd id {} network {}".format(net_uuid,
+                                                                                          vdc_uuid,
+                                                                                          net_details.get('name')))
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_uuid
+                                if int(net_details.get('status')) == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                network_list.append(filter_dict)
+                                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+        except:
+            self.logger.debug("Error in get_vcd_network_list", exc_info=True)
+            pass
+
+        self.logger.debug("get_vcd_network_list returning {}".format(network_list))
+        return network_list
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name  OR/AND
+            id: network uuid    OR/AND
+            shared: boolean     OR/AND
+            tenant_id: tenant   OR/AND
+            admin_state_up: boolean
+            status: 'ACTIVE'
+
+        [{key : value , key : value}]
+
+        Returns the network list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        """
+
+        self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+
+        if not self.tenant_name:
+            raise vimconn.vimconnConnectionException("Tenant name is empty.")
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
+
+        try:
+            vdcid = vdc.get('id').split(":")[3]
+
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
+
+            network_list = []
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            filter_entry = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("get_network_list(): Adding net {}"
+                                                  " to a list vcd id {} network {}".format(net_uuid,
+                                                                                           vdcid,
+                                                                                           net_details.get('name')))
+                                filter_entry["name"] = net_details.get('name')
+                                filter_entry["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_entry["shared"] = shared
+                                filter_entry["tenant_id"] = vdcid
+                                if int(net_details.get('status')) == 1:
+                                    filter_entry["admin_state_up"] = True
+                                else:
+                                    filter_entry["admin_state_up"] = False
+                                filter_entry["status"] = "ACTIVE"
+                                filter_entry["type"] = "bridge"
+                                filtered_entry = filter_entry.copy()
+
+                                if filter_dict is not None and filter_dict:
+                                    # we remove all the key : value we don't care and match only
+                                    # respected field
+                                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+                                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
+                                    if filter_dict == filter_entry:
+                                        network_list.append(filtered_entry)
+                                else:
+                                    network_list.append(filtered_entry)
+        except Exception as e:
+            self.logger.debug("Error in get_network_list",exc_info=True)
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
+
+        self.logger.debug("Returning {}".format(network_list))
+        return network_list
+
+    def get_network(self, net_id):
+        """Method obtains network details of net_id VIM network
+           Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
+
+        try:
+            org, vdc = self.get_vdc_details()
+            vdc_id = vdc.get('id').split(":")[3]
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
+
+            filter_dict = {}
+
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            vdc_network_id = net_details.get('id').split(":")
+                            if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = vdc_network_id[3]
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_id
+                                if int(net_details.get('status')) == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                self.logger.debug("Returning {}".format(filter_dict))
+                                return filter_dict
+                    else:
+                        raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+        except Exception as e:
+            self.logger.debug("Error in get_network")
+            self.logger.debug(traceback.format_exc())
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
+
+        return filter_dict
+
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+
+        # ############# Stub code for SRIOV #################
+#         dvport_group = self.get_dvport_group(net_id)
+#         if dvport_group:
+#             #delete portgroup
+#             status = self.destroy_dvport_group(net_id)
+#             if status:
+#                 # Remove vlanID from persistent info
+#                 if net_id in self.persistent_info["used_vlanIDs"]:
+#                     del self.persistent_info["used_vlanIDs"][net_id]
+#
+#                 return net_id
+
+        vcd_network = self.get_vcd_network(network_uuid=net_id)
+        if vcd_network is not None and vcd_network:
+            if self.delete_network_action(network_uuid=net_id):
+                return net_id
+        else:
+            raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+
+    def refresh_nets_status(self, net_list):
+        """Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down),
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        """
+
+        dict_entry = {}
+        try:
+            for net in net_list:
+                errormsg = ''
+                vcd_network = self.get_vcd_network(network_uuid=net)
+                if vcd_network is not None and vcd_network:
+                    if vcd_network['status'] == '1':
+                        status = 'ACTIVE'
+                    else:
+                        status = 'DOWN'
+                else:
+                    status = 'DELETED'
+                    errormsg = 'Network not found.'
+
+                dict_entry[net] = {'status': status, 'error_msg': errormsg,
+                                   'vim_info': yaml.safe_dump(vcd_network)}
+        except:
+            self.logger.debug("Error in refresh_nets_status")
+            self.logger.debug(traceback.format_exc())
+
+        return dict_entry
+
+    def get_flavor(self, flavor_id):
+        """Obtain flavor details from the  VIM
+            Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+        """
+        if flavor_id not in vimconnector.flavorlist:
+            raise vimconn.vimconnNotFoundException("Flavor not found.")
+        return vimconnector.flavorlist[flavor_id]
+
+    def new_flavor(self, flavor_data):
+        """Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address
+                disk: disk size
+                is_public:
+                 #TODO to concrete
+        Returns the flavor identifier"""
+
+        # generate a new uuid put to internal dict and return it.
+        self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
+        new_flavor=flavor_data
+        ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
+        cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
+        disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
+
+        if not isinstance(ram, int):
+            raise vimconn.vimconnException("Non-integer value for ram")
+        elif not isinstance(cpu, int):
+            raise vimconn.vimconnException("Non-integer value for cpu")
+        elif not isinstance(disk, int):
+            raise vimconn.vimconnException("Non-integer value for disk")
+
+        extended_flv = flavor_data.get("extended")
+        if extended_flv:
+            numas=extended_flv.get("numas")
+            if numas:
+                for numa in numas:
+                    #overwrite ram and vcpus
+                    if 'memory' in numa:
+                        ram = numa['memory']*1024
+                    if 'paired-threads' in numa:
+                        cpu = numa['paired-threads']*2
+                    elif 'cores' in numa:
+                        cpu = numa['cores']
+                    elif 'threads' in numa:
+                        cpu = numa['threads']
+
+        new_flavor[FLAVOR_RAM_KEY] = ram
+        new_flavor[FLAVOR_VCPUS_KEY] = cpu
+        new_flavor[FLAVOR_DISK_KEY] = disk
+        # generate a new uuid put to internal dict and return it.
+        flavor_id = uuid.uuid4()
+        vimconnector.flavorlist[str(flavor_id)] = new_flavor
+        self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
+
+        return str(flavor_id)
+
+    def delete_flavor(self, flavor_id):
+        """Deletes a tenant flavor from VIM identify by its id
+
+           Returns the used id or raise an exception
+        """
+        if flavor_id not in vimconnector.flavorlist:
+            raise vimconn.vimconnNotFoundException("Flavor not found.")
+
+        vimconnector.flavorlist.pop(flavor_id, None)
+        return flavor_id
+
+    def new_image(self, image_dict):
+        """
+        Adds a tenant image to VIM
+        Returns:
+            200, image-id        if the image is created
+            <0, message          if there is an error
+        """
+
+        return self.get_image_id_from_path(image_dict['location'])
+
+    def delete_image(self, image_id):
+        """
+            Deletes a tenant image from VIM
+            Args:
+                image_id is ID of Image to be deleted
+            Return:
+                returns the image identifier in UUID format or raises an exception on error
+        """
+        conn = self.connect_as_admin()
+        if not conn:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        # Get Catalog details
+        url_list = [self.url, '/api/catalog/', image_id]
+        catalog_herf = ''.join(url_list)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                  'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
+
+        response = self.perform_request(req_type='GET',
+                                        url=catalog_herf,
+                                        headers=headers)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("delete_image():GET REST API call {} failed. "\
+                              "Return status code {}".format(catalog_herf,
+                                                             response.status_code))
+            raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
+
+        lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+        namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+        #For python3
+        #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+        namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+        catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
+        catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
+        for catalogItem in catalogItems:
+            catalogItem_href = catalogItem.attrib['href']
+
+            response = self.perform_request(req_type='GET',
+                                        url=catalogItem_href,
+                                        headers=headers)
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("delete_image():GET REST API call {} failed. "\
+                                  "Return status code {}".format(catalog_herf,
+                                                                 response.status_code))
+                raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
+                                                                                    catalogItem,
+                                                                                    image_id))
+
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+
+            #Remove catalogItem
+            response = self.perform_request(req_type='DELETE',
+                                        url=catalogitem_remove_href,
+                                        headers=headers)
+            if response.status_code == requests.codes.no_content:
+                self.logger.debug("Deleted Catalog item {}".format(catalogItem))
+            else:
+                raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
+
+        #Remove catalog
+        url_list = [self.url, '/api/admin/catalog/', image_id]
+        catalog_remove_herf = ''.join(url_list)
+        response = self.perform_request(req_type='DELETE',
+                                        url=catalog_remove_herf,
+                                        headers=headers)
+
+        if response.status_code == requests.codes.no_content:
+            self.logger.debug("Deleted Catalog {}".format(image_id))
+            return image_id
+        else:
+            raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
+
+
+    def catalog_exists(self, catalog_name, catalogs):
+        """
+
+        :param catalog_name:
+        :param catalogs:
+        :return:
+        """
+        for catalog in catalogs:
+            if catalog['name'] == catalog_name:
+                return catalog['id']
+
+    def create_vimcatalog(self, vca=None, catalog_name=None):
+        """ Create new catalog entry in vCloud director.
+
+            Args
+                vca:  vCloud director.
+                catalog_name catalog that client wish to create.   Note no validation done for a name.
+                Client must make sure that provide valid string representation.
+
+             Returns catalog id if catalog created else None.
+
+        """
+        try:
+            lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+            if lxml_catalog_element:
+                id_attr_value = lxml_catalog_element.get('id')  # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
+                return id_attr_value.split(':')[-1]
+            catalogs = vca.list_catalogs()
+        except Exception as ex:
+            self.logger.error(
+                'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+            raise
+        return self.catalog_exists(catalog_name, catalogs)
+
+    # noinspection PyIncorrectDocstring
+    def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
+                   description='', progress=False, chunk_bytes=128 * 1024):
+        """
+        Uploads a OVF file to a vCloud catalog
+
+        :param chunk_bytes:
+        :param progress:
+        :param description:
+        :param image_name:
+        :param vca:
+        :param catalog_name: (str): The name of the catalog to upload the media.
+        :param media_file_name: (str): The name of the local media file to upload.
+        :return: (bool) True if the media file was successfully uploaded, false otherwise.
+        """
+        os.path.isfile(media_file_name)
+        statinfo = os.stat(media_file_name)
+
+        #  find a catalog entry where we upload OVF.
+        #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
+        #  status change.
+        #  if VCD can parse OVF we upload VMDK file
+        try:
+            for catalog in vca.list_catalogs():
+                if catalog_name != catalog['name']:
+                    continue
+                catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
+                data = """
+                <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
+                """.format(catalog_name, description)
+
+                if self.client:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+
+                response = self.perform_request(req_type='POST',
+                                                url=catalog_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code == requests.codes.created:
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if
+                              child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    href = entity.get('href')
+                    template = href
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=href,
+                                                    headers=headers)
+
+                    if response.status_code == requests.codes.ok:
+                        headers['Content-Type'] = 'Content-Type text/xml'
+                        result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
+                        if result:
+                            transfer_href = result.group(1)
+
+                        response = self.perform_request(req_type='PUT',
+                                                    url=transfer_href,
+                                                    headers=headers,
+                                                    data=open(media_file_name, 'rb'))
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug(
+                                "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+                                                                                                      media_file_name))
+                            return False
+
+                    # TODO fix this with aync block
+                    time.sleep(5)
+
+                    self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+
+                    # uploading VMDK file
+                    # check status of OVF upload and upload remaining files.
+                    response = self.perform_request(req_type='GET',
+                                                    url=template,
+                                                    headers=headers)
+
+                    if response.status_code == requests.codes.ok:
+                        result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
+                        if result:
+                            link_href = result.group(1)
+                        # we skip ovf since it already uploaded.
+                        if 'ovf' in link_href:
+                            continue
+                        # The OVF file and VMDK must be in a same directory
+                        head, tail = os.path.split(media_file_name)
+                        file_vmdk = head + '/' + link_href.split("/")[-1]
+                        if not os.path.isfile(file_vmdk):
+                            return False
+                        statinfo = os.stat(file_vmdk)
+                        if statinfo.st_size == 0:
+                            return False
+                        hrefvmdk = link_href
+
+                        if progress:
+                            widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+                                           FileTransferSpeed()]
+                            progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+                        bytes_transferred = 0
+                        f = open(file_vmdk, 'rb')
+                        while bytes_transferred < statinfo.st_size:
+                            my_bytes = f.read(chunk_bytes)
+                            if len(my_bytes) <= chunk_bytes:
+                                headers['Content-Range'] = 'bytes {}-{}/{}'.format(
+                                    bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                headers['Content-Length'] = str(len(my_bytes))
+                                response = requests.put(url=hrefvmdk,
+                                                         headers=headers,
+                                                         data=my_bytes,
+                                                         verify=False)
+                                if response.status_code == requests.codes.ok:
+                                    bytes_transferred += len(my_bytes)
+                                    if progress:
+                                        progress_bar.update(bytes_transferred)
+                                else:
+                                    self.logger.debug(
+                                        'file upload failed with error: [{}] {}'.format(response.status_code,
+                                                                                        response.content))
+
+                                    f.close()
+                                    return False
+                        f.close()
+                        if progress:
+                            progress_bar.finish()
+                            time.sleep(10)
+                    return True
+                else:
+                    self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+                                      format(catalog_name, media_file_name))
+                    return False
+        except Exception as exp:
+            self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+            raise vimconn.vimconnException(
+                "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+
+        self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
+        return False
+
+    def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
+        """Upload media file"""
+        # TODO add named parameters for readability
+
+        return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
+                               media_file_name=medial_file_name, description='medial_file_name', progress=progress)
+
+    def validate_uuid4(self, uuid_string=None):
+        """  Method validate correct format of UUID.
+
+        Return: true if string represent valid uuid
+        """
+        try:
+            val = uuid.UUID(uuid_string, version=4)
+        except ValueError:
+            return False
+        return True
+
+    def get_catalogid(self, catalog_name=None, catalogs=None):
+        """  Method check catalog and return catalog ID in UUID format.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs uuid
+        """
+
+        for catalog in catalogs:
+            if catalog['name'] == catalog_name:
+                catalog_id = catalog['id']
+                return catalog_id
+        return None
+
+    def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get('id')
+            if catalog_id == catalog_uuid:
+                return catalog.get('name')
+        return None
+
+    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get('id')
+            if catalog_id == catalog_uuid:
+                return catalog
+        return None
+
+    def get_image_id_from_path(self, path=None, progress=False):
+        """  Method upload OVF image to vCloud director.
+
+        Each OVF image represented as single catalog entry in vcloud director.
+        The method check for existing catalog entry.  The check done by file name without file extension.
+
+        if given catalog name already present method will respond with existing catalog uuid otherwise
+        it will create new catalog entry and upload OVF file to newly created catalog.
+
+        If method can't create catalog entry or upload a file it will throw exception.
+
+        Method accept boolean flag progress that will output progress bar. It useful method
+        for standalone upload use case. In case to test large file upload.
+
+        Args
+            path: - valid path to OVF file.
+            progress - boolean progress bar show progress bar.
+
+        Return: if image uploaded correct method will provide image catalog UUID.
+        """
+
+        if not path:
+            raise vimconn.vimconnException("Image path can't be None.")
+
+        if not os.path.isfile(path):
+            raise vimconn.vimconnException("Can't read file. File not found.")
+
+        if not os.access(path, os.R_OK):
+            raise vimconn.vimconnException("Can't read file. Check file permission to read.")
+
+        self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
+
+        dirpath, filename = os.path.split(path)
+        flname, file_extension = os.path.splitext(path)
+        if file_extension != '.ovf':
+            self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
+            raise vimconn.vimconnException("Wrong container.  vCloud director supports only OVF.")
+
+        catalog_name = os.path.splitext(filename)[0]
+        catalog_md5_name = hashlib.md5(path).hexdigest()
+        self.logger.debug("File name {} Catalog Name {} file path {} "
+                          "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
+
+        try:
+            org,vdc = self.get_vdc_details()
+            catalogs = org.list_catalogs()
+        except Exception as exp:
+            self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+            raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+
+        if len(catalogs) == 0:
+            self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
+            if self.create_vimcatalog(org, catalog_md5_name) is None:
+                raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+
+            result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
+                                          media_name=filename, medial_file_name=path, progress=progress)
+            if not result:
+                raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
+            return self.get_catalogid(catalog_name, catalogs)
+        else:
+            for catalog in catalogs:
+                # search for existing catalog if we find same name we return ID
+                # TODO optimize this
+                if catalog['name'] == catalog_md5_name:
+                    self.logger.debug("Found existing catalog entry for {} "
+                                      "catalog id {}".format(catalog_name,
+                                                             self.get_catalogid(catalog_md5_name, catalogs)))
+                    return self.get_catalogid(catalog_md5_name, catalogs)
+
+        # if we didn't find existing catalog we create a new one and upload image.
+        self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
+        if self.create_vimcatalog(org, catalog_md5_name) is None:
+            raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+
+        result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
+                                      media_name=filename, medial_file_name=path, progress=progress)
+        if not result:
+            raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
+
+        return self.get_catalogid(catalog_md5_name, org.list_catalogs())
+
+    def get_image_list(self, filter_dict={}):
+        '''Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+
+        try:
+            org, vdc = self.get_vdc_details()
+            image_list = []
+            catalogs = org.list_catalogs()
+            if len(catalogs) == 0:
+                return image_list
+            else:
+                for catalog in catalogs:
+                    catalog_uuid = catalog.get('id')
+                    name = catalog.get('name')
+                    filtered_dict = {}
+                    if filter_dict.get("name") and filter_dict["name"] != name:
+                        continue
+                    if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
+                        continue
+                    filtered_dict ["name"] = name
+                    filtered_dict ["id"] = catalog_uuid
+                    image_list.append(filtered_dict)
+
+                self.logger.debug("List of already created catalog items: {}".format(image_list))
+                return image_list
+        except Exception as exp:
+            raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
+
+    def get_vappid(self, vdc=None, vapp_name=None):
+        """ Method takes vdc object and vApp name and returns vapp uuid or None
+
+        Args:
+            vdc: The VDC object.
+            vapp_name: is application vappp name identifier
+
+        Returns:
+                The return vApp name otherwise None
+        """
+        if vdc is None or vapp_name is None:
+            return None
+        # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
+        try:
+            refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+                          vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+            if len(refs) == 1:
+                return refs[0].href.split("vapp")[1][1:]
+        except Exception as e:
+            self.logger.exception(e)
+            return False
+        return None
+
+    def check_vapp(self, vdc=None, vapp_uuid=None):
+        """ Method Method returns True or False if vapp deployed in vCloud director
+
+            Args:
+                vca: Connector to VCA
+                vdc: The VDC object.
+                vappid: vappid is application identifier
+
+            Returns:
+                The return True if vApp deployed
+                :param vdc:
+                :param vapp_uuid:
+        """
+        try:
+            refs = filter(lambda ref:
+                          ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+                          vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+            for ref in refs:
+                vappid = ref.href.split("vapp")[1][1:]
+                # find vapp with respected vapp uuid
+                if vappid == vapp_uuid:
+                    return True
+        except Exception as e:
+            self.logger.exception(e)
+            return False
+        return False
+
+    def get_namebyvappid(self, vapp_uuid=None):
+        """Method returns vApp name from vCD and lookup done by vapp_id.
+
+        Args:
+            vapp_uuid: vappid is application identifier
+
+        Returns:
+            The return vApp name otherwise None
+        """
+        try:
+            if self.client and vapp_uuid:
+                vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                response = self.perform_request(req_type='GET',
+                                                url=vapp_call,
+                                                headers=headers)
+                #Retry login if session expired & retry sending request
+                if response.status_code == 403:
+                    response = self.retry_rest('GET', vapp_call)
+
+                tree = XmlElementTree.fromstring(response.content)
+                return tree.attrib['name']
+        except Exception as e:
+            self.logger.exception(e)
+            return None
+        return None
+
+    def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
+                       cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
+        """Adds a VM instance to VIM
+        Params:
+            'start': (boolean) indicates if VM must start or created in pause mode.
+            'image_id','flavor_id': image and flavor VIM id to use for the VM
+            'net_list': list of interfaces, each one is a dictionary with:
+                'name': (optional) name for the interface.
+                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                'mac_address': (optional) mac address to assign to this interface
+                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                'type': (mandatory) can be one of:
+                    'virtual', in this case always connected to a network of type 'net_type=bridge'
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                            are allocated on the same physical NIC
+                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                or True, it must apply the default VIM behaviour
+                After execution the method will add the key:
+                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                        interface. 'net_list' is modified
+            'cloud_config': (optional) dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        self.logger.info("Creating new instance for entry {}".format(name))
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
+                          "availability_zone_index {} availability_zone_list {}"\
+                          .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
+                                  availability_zone_index, availability_zone_list))
+
+        #new vm name = vmname + tenant_id + uuid
+        new_vm_name = [name, '-', str(uuid.uuid4())]
+        vmname_andid = ''.join(new_vm_name)
+
+        for net in net_list:
+            if net['type'] == "PCI-PASSTHROUGH":
+                raise vimconn.vimconnNotSupportedException(
+                      "Current vCD version does not support type : {}".format(net['type']))
+
+        if len(net_list) > 10:
+            raise vimconn.vimconnNotSupportedException(
+                      "The VM hardware versions 7 and above support upto 10 NICs only")
+
+        # if vm already deployed we return existing uuid
+        # we check for presence of VDC, Catalog entry and Flavor.
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnNotFoundException(
+                "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
+        catalogs = org.list_catalogs()
+        if catalogs is None:
+            #Retry once, if failed by refreshing token
+            self.get_token()
+            org = Org(self.client, resource=self.client.get_org())
+            catalogs = org.list_catalogs()
+        if catalogs is None:
+            raise vimconn.vimconnNotFoundException(
+                "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
+
+        catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+        if catalog_hash_name:
+            self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
+        else:
+            raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+                                                   "(Failed retrieve catalog information {})".format(name, image_id))
+
+        # Set vCPU and Memory based on flavor.
+        vm_cpus = None
+        vm_memory = None
+        vm_disk = None
+        numas = None
+
+        if flavor_id is not None:
+            if flavor_id not in vimconnector.flavorlist:
+                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+                                                       "Failed retrieve flavor information "
+                                                       "flavor id {}".format(name, flavor_id))
+            else:
+                try:
+                    flavor = vimconnector.flavorlist[flavor_id]
+                    vm_cpus = flavor[FLAVOR_VCPUS_KEY]
+                    vm_memory = flavor[FLAVOR_RAM_KEY]
+                    vm_disk = flavor[FLAVOR_DISK_KEY]
+                    extended = flavor.get("extended", None)
+                    if extended:
+                        numas=extended.get("numas", None)
+
+                except Exception as exp:
+                    raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
+
+        # image upload creates template name as catalog name space Template.
+        templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+        power_on = 'false'
+        if start:
+            power_on = 'true'
+
+        # client must provide at least one entry in net_list if not we report error
+        #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
+        #If no mgmt, then the 1st NN in netlist is considered as primary net. 
+        primary_net = None
+        primary_netname = None
+        primary_net_href = None
+        network_mode = 'bridged'
+        if net_list is not None and len(net_list) > 0:
+            for net in net_list:
+                if 'use' in net and net['use'] == 'mgmt' and not primary_net:
+                    primary_net = net
+            if primary_net is None:
+                primary_net = net_list[0]
+
+            try:
+                primary_net_id = primary_net['net_id']
+                url_list = [self.url, '/api/network/', primary_net_id]
+                primary_net_href = ''.join(url_list) 
+                network_dict = self.get_vcd_network(network_uuid=primary_net_id)
+                if 'name' in network_dict:
+                    primary_netname = network_dict['name']
+
+            except KeyError:
+                raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
+        else:
+            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
+
+        # use: 'data', 'bridge', 'mgmt'
+        # create vApp.  Set vcpu and ram based on flavor id.
+        try:
+            vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
+            if not vdc_obj:
+                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
+
+            for retry in (1,2):
+                items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    if self.client:
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                url=catalog_items[0].get('href'),
+                                                headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+
+                response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
+                                                                                           response.status_code))
+                else:
+                    result = (response.content).replace("\n"," ")
+
+                vapp_template_tree = XmlElementTree.fromstring(response.content)
+                children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
+                vm_element = [child for child in children_element if 'Vm' in child.tag][0]
+                vm_name = vm_element.get('name')
+                vm_id = vm_element.get('id')
+                vm_href = vm_element.get('href')
+
+                cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
+
+                headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
+                vdc_id = vdc.get('id').split(':')[-1]
+                instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
+                                                                                                vdc_id)
+                data = """<?xml version="1.0" encoding="UTF-8"?>
+                <InstantiateVAppTemplateParams
+                xmlns="http://www.vmware.com/vcloud/v1.5"
+                name="{}"
+                deploy="false"
+                powerOn="false"
+                xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+                xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+                <Description>Vapp instantiation</Description>
+                <InstantiationParams>
+                     <NetworkConfigSection>
+                         <ovf:Info>Configuration parameters for logical networks</ovf:Info>
+                         <NetworkConfig networkName="{}">
+                             <Configuration>
+                                 <ParentNetwork href="{}" />
+                                 <FenceMode>bridged</FenceMode>
+                             </Configuration>
+                         </NetworkConfig>
+                     </NetworkConfigSection>
+                <LeaseSettingsSection
+                type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
+                <ovf:Info>Lease Settings</ovf:Info>
+                <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
+                <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
+                </LeaseSettingsSection>
+                </InstantiationParams>
+                <Source href="{}"/>
+                <SourcedItem>
+                <Source href="{}" id="{}" name="{}"
+                type="application/vnd.vmware.vcloud.vm+xml"/>
+                <VmGeneralParams>
+                    <NeedsCustomization>false</NeedsCustomization>
+                </VmGeneralParams>
+                <InstantiationParams>
+                      <NetworkConnectionSection>
+                      <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                      <NetworkConnection network="{}">
+                      <NetworkConnectionIndex>0</NetworkConnectionIndex>
+                      <IsConnected>true</IsConnected>
+                      <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
+                      </NetworkConnection>
+                      </NetworkConnectionSection><ovf:VirtualHardwareSection>
+                      <ovf:Info>Virtual hardware requirements</ovf:Info>
+                      <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                      xmlns:vmw="http://www.vmware.com/schema/ovf">
+                      <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+                      <rasd:Description>Number of Virtual CPUs</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
+                      <rasd:InstanceID>4</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>3</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
+                      </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
+                      <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+                      <rasd:Description>Memory Size</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
+                      <rasd:InstanceID>5</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>4</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      </ovf:Item>
+                </ovf:VirtualHardwareSection>
+                </InstantiationParams>
+                </SourcedItem>
+                <AllEULAsAccepted>false</AllEULAsAccepted>
+                </InstantiateVAppTemplateParams>""".format(vmname_andid,
+                                                        primary_netname,
+                                                        primary_net_href,
+                                                     vapp_tempalte_href,
+                                                                vm_href,
+                                                                  vm_id,
+                                                                vm_name,
+                                                        primary_netname,
+                                                               cpu=cpus,
+                                                             core=cores,
+                                                       memory=memory_mb)
+
+                response = self.perform_request(req_type='POST',
+                                                url=instantiate_vapp_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code != 201:
+                    self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {}".format(instantiate_vapp_href,
+                                                        response.content,
+                                                   response.status_code))
+                    raise vimconn.vimconnException("new_vminstance(): Failed to create"\
+                                                        "vAapp {}".format(vmname_andid))
+                else:
+                    vapptask = self.get_task_from_response(response.content)
+
+                if vapptask is None and retry==1:
+                    self.get_token() # Retry getting token
+                    continue
+                else:
+                    break
+
+            if vapptask is None or vapptask is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=vapptask)
+
+            if result.get('status') == 'success':
+                self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
+
+        # we should have now vapp in undeployed state.
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp_uuid = vapp_resource.get('id').split(':')[-1]
+            vapp = VApp(self.client, resource=vapp_resource)
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                    .format(vmname_andid, exp))
+
+        if vapp_uuid is None:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): Failed to retrieve vApp {} after creation".format(
+                                                                            vmname_andid))
+
+        #Add PCI passthrough/SRIOV configrations
+        vm_obj = None
+        pci_devices_info = []
+        reserve_memory = False
+
+        for net in net_list:
+            if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
+                pci_devices_info.append(net)
+            elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
+                reserve_memory = True
+
+        #Add PCI
+        if len(pci_devices_info) > 0:
+            self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
+                                                                        vmname_andid ))
+            PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
+                                                                            pci_devices_info,
+                                                                            vmname_andid)
+            if PCI_devices_status:
+                self.logger.info("Added PCI devives {} to VM {}".format(
+                                                            pci_devices_info,
+                                                            vmname_andid)
+                                 )
+                reserve_memory = True
+            else:
+                self.logger.info("Fail to add PCI devives {} to VM {}".format(
+                                                            pci_devices_info,
+                                                            vmname_andid)
+                                 )
+
+        # Modify vm disk
+        if vm_disk:
+            #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
+            result = self.modify_vm_disk(vapp_uuid, vm_disk)
+            if result :
+                self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
+
+        #Add new or existing disks to vApp
+        if disk_list:
+            added_existing_disk = False
+            for disk in disk_list:
+                if 'device_type' in disk and disk['device_type'] == 'cdrom':
+                    image_id = disk['image_id']
+                    # Adding CD-ROM to VM
+                    # will revisit code once specification ready to support this feature
+                    self.insert_media_to_vm(vapp, image_id)
+                elif "image_id" in disk and disk["image_id"] is not None:
+                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                    disk["image_id"] , vapp_uuid))
+                    self.add_existing_disk(catalogs=catalogs,
+                                           image_id=disk["image_id"],
+                                           size = disk["size"],
+                                           template_name=templateName,
+                                           vapp_uuid=vapp_uuid
+                                           )
+                    added_existing_disk = True
+                else:
+                    #Wait till added existing disk gets reflected into vCD database/API
+                    if added_existing_disk:
+                        time.sleep(5)
+                        added_existing_disk = False
+                    self.add_new_disk(vapp_uuid, disk['size'])
+
+        if numas:
+            # Assigning numa affinity setting
+            for numa in numas:
+                if 'paired-threads-id' in numa:
+                    paired_threads_id = numa['paired-threads-id']
+                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
+        # add NICs & connect to networks in netlist
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp = VApp(self.client, resource=vapp_resource)
+            vapp_id = vapp_resource.get('id').split(':')[-1]
+
+            self.logger.info("Removing primary NIC: ")
+            # First remove all NICs so that NIC properties can be adjusted as needed
+            self.remove_primary_network_adapter_from_all_vms(vapp)
+
+            self.logger.info("Request to connect VM to a network: {}".format(net_list))
+            primary_nic_index = 0
+            nicIndex = 0
+            for net in net_list:
+                # openmano uses network id in UUID format.
+                # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
+                # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
+                #   'vpci': '0000:00:11.0', 'name': 'eth0'}]
+
+                if 'net_id' not in net:
+                    continue
+
+                #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
+                #Same will be returned in refresh_vms_status() as vim_interface_id
+                net['vim_id'] = net['net_id']  # Provide the same VIM identifier as the VIM network
+
+                interface_net_id = net['net_id']
+                interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
+                interface_network_mode = net['use']
+
+                if interface_network_mode == 'mgmt':
+                    primary_nic_index = nicIndex
+
+                """- POOL (A static IP address is allocated automatically from a pool of addresses.)
+                                  - DHCP (The IP address is obtained from a DHCP service.)
+                                  - MANUAL (The IP address is assigned manually in the IpAddress element.)
+                                  - NONE (No IP addressing mode specified.)"""
+
+                if primary_netname is not None:
+                    self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
+                    nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
+                    #For python3
+                    #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
+                    if len(nets) == 1:
+                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
+
+                        if interface_net_name != primary_netname:
+                            # connect network to VM - with all DHCP by default
+                            self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
+                            self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
+
+                        type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
+                        nic_type = 'VMXNET3'
+                        if 'type' in net and net['type'] not in type_list:
+                            # fetching nic type from vnf
+                            if 'model' in net:
+                                if net['model'] is not None:
+                                    if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
+                                        nic_type = 'VMXNET3'
+                                else:
+                                    nic_type = net['model']
+
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].get('name')))
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                            else:
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                         "to a network {}".format(nets[0].get('name')))
+                                if net['type'] in ['SR-IOV', 'VF']:
+                                    nic_type = net['type']
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                nicIndex += 1
+
+            # cloud-init for ssh-key injection
+            if cloud_config:
+                # Create a catalog which will be carrying the config drive ISO
+                # This catalog is deleted during vApp deletion. The catalog name carries
+                # vApp UUID and thats how it gets identified during its deletion.
+                config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
+                self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+                    config_drive_catalog_name))
+                config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+                if config_drive_catalog_id is None:
+                    error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
+                                "ISO".format(config_drive_catalog_name)
+                    raise Exception(error_msg)
+
+                # Create config-drive ISO
+                _, userdata = self._create_user_data(cloud_config)
+                # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
+                iso_path = self.create_config_drive_iso(userdata)
+                self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+
+                self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+                self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
+                # Attach the config-drive ISO to the VM
+                self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+                # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
+                time.sleep(5)
+                self.insert_media_to_vm(vapp, config_drive_catalog_id)
+                shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
+
+            # If VM has PCI devices or SRIOV reserve memory for VM
+            if reserve_memory:
+                self.reserve_memory_for_all_vms(vapp, memory_mb)
+
+            self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
+
+            poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
+            result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+            if result.get('status') == 'success':
+                self.logger.info("new_vminstance(): Successfully power on "\
+                                             "vApp {}".format(vmname_andid))
+            else:
+                self.logger.error("new_vminstance(): failed to power on vApp "\
+                                                     "{}".format(vmname_andid))
+
+        except Exception as exp:
+            try:
+                self.delete_vminstance(vapp_uuid)
+            except Exception as exp2:
+                self.logger.error("new_vminstance rollback fail {}".format(exp2))
+            # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+            self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
+                              .format(name, exp))
+            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
+                                           .format(name, exp))
+
+        # check if vApp deployed and if that the case return vApp UUID otherwise -1
+        wait_time = 0
+        vapp_uuid = None
+        while wait_time <= MAX_WAIT_TIME:
+            try:
+                vapp_resource = vdc_obj.get_vapp(vmname_andid)
+                vapp = VApp(self.client, resource=vapp_resource)
+            except Exception as exp:
+                raise vimconn.vimconnUnexpectedResponse(
+                        "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                        .format(vmname_andid, exp))
+
+            #if vapp and vapp.me.deployed:
+            if vapp and vapp_resource.get('deployed') == 'true':
+                vapp_uuid = vapp_resource.get('id').split(':')[-1]
+                break
+            else:
+                self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
+                time.sleep(INTERVAL_TIME)
+
+            wait_time +=INTERVAL_TIME
+
+        #SET Affinity Rule for VM
+        #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+        #While creating VIM account user has to pass the Host Group names in availability_zone list
+        #"availability_zone" is a  part of VIM "config" parameters
+        #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
+        #Host groups are referred as availability zones
+        #With following procedure, deployed VM will be added into a VM group.
+        #Then A VM to Host Affinity rule will be created using the VM group & Host group.
+        if(availability_zone_list):
+            self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
+            #Admin access required for creating Affinity rules
+            client = self.connect_as_admin()
+            if not client:
+                raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+            else:
+                self.client = client
+            if self.client:
+                headers = {'Accept':'application/*+xml;version=27.0',
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            #Step1: Get provider vdc details from organization
+            pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
+            if pvdc_href is not None:
+            #Step2: Found required pvdc, now get resource pool information
+                respool_href = self.get_resource_pool_details(pvdc_href, headers)
+                if respool_href is None:
+                    #Raise error if respool_href not found
+                    msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
+                           .format(pvdc_href)
+                    self.log_message(msg)
+
+            #Step3: Verify requested availability zone(hostGroup) is present in vCD
+            # get availability Zone
+            vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
+            # check if provided av zone(hostGroup) is present in vCD VIM
+            status = self.check_availibility_zone(vm_az, respool_href, headers)
+            if status is False:
+                msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
+                       "resource pool {} status: {}".format(vm_az,respool_href,status)
+                self.log_message(msg)
+            else:
+                self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
+
+            #Step4: Find VM group references to create vm group
+            vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
+            if vmgrp_href == None:
+                msg = "new_vminstance(): No reference to VmGroup found in resource pool"
+                self.log_message(msg)
+
+            #Step5: Create a VmGroup with name az_VmGroup
+            vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
+            status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
+            if status is not True:
+                msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
+                self.log_message(msg)
+
+            #VM Group url to add vms to vm group
+            vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
+
+            #Step6: Add VM to VM Group
+            #Find VM uuid from vapp_uuid
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            vm_uuid = vm_details['vmuuid']
+
+            status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
+            if status is not True:
+                msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
+                self.log_message(msg)
+
+            #Step7: Create VM to Host affinity rule
+            addrule_href = self.get_add_rule_reference (respool_href, headers)
+            if addrule_href is None:
+                msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
+                      .format(respool_href)
+                self.log_message(msg)
+
+            status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity",  headers)
+            if status is False:
+                msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
+                      .format(name, vm_az)
+                self.log_message(msg)
+            else:
+                self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
+                                    .format(name, vm_az))
+            #Reset token to a normal user to perform other operations
+            self.get_token()
+
+        if vapp_uuid is not None:
+            return vapp_uuid, None
+        else:
+            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
+
+    def create_config_drive_iso(self, user_data):
+        tmpdir = tempfile.mkdtemp()
+        iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
+        latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+        os.makedirs(latest_dir)
+        with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
+                open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+            userdata_file_obj.write(user_data)
+            meta_file_obj.write(json.dumps({"availability_zone": "nova",
+                                            "launch_index": 0,
+                                            "name": "ConfigDrive",
+                                            "uuid": str(uuid.uuid4())}
+                                           )
+                                )
+        genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
+            iso_path=iso_path, source_dir_path=tmpdir)
+        self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+        try:
+            FNULL = open(os.devnull, 'w')
+            subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+        except subprocess.CalledProcessError as e:
+            shutil.rmtree(tmpdir, ignore_errors=True)
+            error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        return iso_path
+
+    def upload_iso_to_catalog(self, catalog_id, iso_file_path):
+        if not os.path.isfile(iso_file_path):
+            error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        iso_file_stat = os.stat(iso_file_path)
+        xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+                            <Media
+                                xmlns="http://www.vmware.com/vcloud/v1.5"
+                                name="{iso_name}"
+                                size="{iso_size}"
+                                imageType="iso">
+                                <Description>ISO image for config-drive</Description>
+                            </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
+        catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
+        response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+
+        if response.status_code != 201:
+            error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+
+        catalogItem = XmlElementTree.fromstring(response.content)
+        entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
+        entity_href = entity.get('href')
+
+        response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+        if response.status_code != 200:
+            raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+
+        match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+        if match:
+            media_upload_href = match.group(1)
+        else:
+            raise Exception('Could not parse the upload URL for the media file from the last response')
+        upload_iso_task = self.get_task_from_response(response.content)
+        headers['Content-Type'] = 'application/octet-stream'
+        response = self.perform_request(req_type='PUT',
+                                        url=media_upload_href,
+                                        headers=headers,
+                                        data=open(iso_file_path, 'rb'))
+
+        if response.status_code != 200:
+            raise Exception('PUT request to "{}" failed'.format(media_upload_href))
+        result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
+        if result.get('status') != 'success':
+            raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
+
+    def get_vcd_availibility_zones(self,respool_href, headers):
+        """ Method to find presence of av zone is VIM resource pool
+
+            Args:
+                respool_href - resource pool href
+                headers - header information
+
+            Returns:
+               vcd_az - list of azone present in vCD
+        """
+        vcd_az = []
+        url=respool_href
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+        else:
+        #Get the href to hostGroups and find provided hostGroup is present in it
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                hostGroup = schild.attrib.get('href')
+                                hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
+                                if hg_resp.status_code != requests.codes.ok:
+                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
+                                else:
+                                    hg_resp_xml =  XmlElementTree.fromstring(hg_resp.content)
+                                    for hostGroup in hg_resp_xml:
+                                        if 'HostGroup' in hostGroup.tag:
+                                            #append host group name to the list
+                                            vcd_az.append(hostGroup.attrib.get("name"))
+        return vcd_az
+
+
+    def set_availability_zones(self):
+        """
+        Set vim availability zone
+        """
+
+        vim_availability_zones = None
+        availability_zone = None
+        if 'availability_zone' in self.config:
+            vim_availability_zones = self.config.get('availability_zone')
+        if isinstance(vim_availability_zones, str):
+            availability_zone = [vim_availability_zones]
+        elif isinstance(vim_availability_zones, list):
+            availability_zone = vim_availability_zones
+        else:
+            return availability_zone
+
+        return availability_zone
+
+
+    def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+        """
+        Return the availability zone to be used by the created VM.
+        returns: The VIM availability zone to be used or None
+        """
+        if availability_zone_index is None:
+            if not self.config.get('availability_zone'):
+                return None
+            elif isinstance(self.config.get('availability_zone'), str):
+                return self.config['availability_zone']
+            else:
+                return self.config['availability_zone'][0]
+
+        vim_availability_zones = self.availability_zone
+
+        # check if VIM offer enough availability zones describe in the VNFD
+        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+            # check if all the names of NFV AV match VIM AV names
+            match_by_index = False
+            for av in availability_zone_list:
+                if av not in vim_availability_zones:
+                    match_by_index = True
+                    break
+            if match_by_index:
+                self.logger.debug("Required Availability zone or Host Group not found in VIM config")
+                self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
+                self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
+                self.logger.debug("VIM Availability zones will be used by index")
+                return vim_availability_zones[availability_zone_index]
+            else:
+                return availability_zone_list[availability_zone_index]
+        else:
+            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+
+    def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
+        """ Method to create VM to Host Affinity rule in vCD
+
+        Args:
+            addrule_href - href to make a POST request
+            vmgrpname - name of the VM group created
+            hostgrpnmae - name of the host group created earlier
+            polarity - Affinity or Anti-affinity (default: Affinity)
+            headers - headers to make REST call
+
+        Returns:
+            True- if rule is created
+            False- Failed to create rule due to some error
+
+        """
+        task_status = False
+        rule_name = polarity + "_" + vmgrpname
+        payload = """<?xml version="1.0" encoding="UTF-8"?>
+                     <vmext:VMWVmHostAffinityRule
+                       xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                       xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                       type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
+                       <vcloud:Name>{}</vcloud:Name>
+                       <vcloud:IsEnabled>true</vcloud:IsEnabled>
+                       <vcloud:IsMandatory>true</vcloud:IsMandatory>
+                       <vcloud:Polarity>{}</vcloud:Polarity>
+                       <vmext:HostGroupName>{}</vmext:HostGroupName>
+                       <vmext:VmGroupName>{}</vmext:VmGroupName>
+                     </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
+
+        resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
+
+        if resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
+            task_status = False
+            return task_status
+        else:
+            affinity_task = self.get_task_from_response(resp.content)
+            self.logger.debug ("affinity_task: {}".format(affinity_task))
+            if affinity_task is None or affinity_task is False:
+                raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
+            if result.get('status') == 'success':
+                self.logger.debug("Successfully created affinity rule {}".format(rule_name))
+                return True
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                      "failed to create affinity rule {}".format(rule_name))
+
+
+    def get_add_rule_reference (self, respool_href, headers):
+        """ This method finds href to add vm to host affinity rule to vCD
+
+        Args:
+            respool_href- href to resource pool
+            headers- header information to make REST call
+
+        Returns:
+            None - if no valid href to add rule found or
+            addrule_href - href to add vm to host affinity rule of resource pool
+        """
+        addrule_href = None
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+        else:
+
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
+                                schild.attrib.get('rel') == "add":
+                                addrule_href = schild.attrib.get('href')
+                                break
+
+        return addrule_href
+
+
+    def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
+        """ Method to add deployed VM to newly created VM Group.
+            This is required to create VM to Host affinity in vCD
+
+        Args:
+            vm_uuid- newly created vm uuid
+            vmGroupNameURL- URL to VM Group name
+            vmGroup_name- Name of VM group created
+            headers- Headers for REST request
+
+        Returns:
+            True- if VM added to VM group successfully
+            False- if any error encounter
+        """
+
+        addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
+
+        if addvm_resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
+                               .format(vmGroupNameURL, addvm_resp.status_code))
+            return False
+        else:
+            resp_xml = XmlElementTree.fromstring(addvm_resp.content)
+            for child in resp_xml:
+                if child.tag.split('}')[1] == 'Link':
+                    if child.attrib.get("rel") == "addVms":
+                        addvmtogrpURL =  child.attrib.get("href")
+
+        #Get vm details
+        url_list = [self.url, '/api/vApp/vm-',vm_uuid]
+        vmdetailsURL = ''.join(url_list)
+
+        resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
+            return False
+
+        #Parse VM details
+        resp_xml = XmlElementTree.fromstring(resp.content)
+        if resp_xml.tag.split('}')[1] == "Vm":
+            vm_id = resp_xml.attrib.get("id")
+            vm_name = resp_xml.attrib.get("name")
+            vm_href = resp_xml.attrib.get("href")
+            #print vm_id, vm_name, vm_href
+        #Add VM into VMgroup
+        payload = """<?xml version="1.0" encoding="UTF-8"?>\
+                   <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
+                    xmlns="http://www.vmware.com/vcloud/versions" \
+                    xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
+                    xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
+                    xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
+                    xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
+                    xmlns:ns7="http://www.vmware.com/schema/ovf" \
+                    xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
+                    xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
+                    <ns2:VmReference href="{}" id="{}" name="{}" \
+                    type="application/vnd.vmware.vcloud.vm+xml" />\
+                   </ns2:Vms>""".format(vm_href, vm_id, vm_name)
+
+        addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
+
+        if addvmtogrp_resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
+            return False
+        else:
+            self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
+            return True
+
+
+    def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
+        """Method to create a VM group in vCD
+
+           Args:
+              vmgroup_name : Name of VM group to be created
+              vmgroup_href : href for vmgroup
+              headers- Headers for REST request
+        """
+        #POST to add URL with required data
+        vmgroup_status = False
+        payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
+                       xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
+                   <vmCount>1</vmCount>\
+                   </VMWVmGroup>""".format(vmgroup_name)
+        resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
+
+        if resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
+            return vmgroup_status
+        else:
+            vmgroup_task = self.get_task_from_response(resp.content)
+            if vmgroup_task is None or vmgroup_task is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
+
+            if result.get('status') == 'success':
+                self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
+                #time.sleep(10)
+                vmgroup_status = True
+                return vmgroup_status
+            else:
+                raise vimconn.vimconnUnexpectedResponse(\
+                        "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+
+
+    def find_vmgroup_reference(self, url, headers):
+        """ Method to create a new VMGroup which is required to add created VM
+            Args:
+               url- resource pool href
+               headers- header information
+
+            Returns:
+               returns href to VM group to create VM group
+        """
+        #Perform GET on resource pool to find 'add' link to create VMGroup
+        #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
+        vmgrp_href = None
+        resp = self.perform_request(req_type='GET',url=url, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+        else:
+            #Get the href to add vmGroup to vCD
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            #Find href with type VMGroup and rel with add
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
+                                and schild.attrib.get('rel') == "add":
+                                vmgrp_href = schild.attrib.get('href')
+                                return vmgrp_href
+
+
+    def check_availibility_zone(self, az, respool_href, headers):
+        """ Method to verify requested av zone is present or not in provided
+            resource pool
+
+            Args:
+                az - name of hostgroup (availibility_zone)
+                respool_href - Resource Pool href
+                headers - Headers to make REST call
+            Returns:
+                az_found - True if availibility_zone is found else False
+        """
+        az_found = False
+        headers['Accept']='application/*+xml;version=27.0'
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+        else:
+        #Get the href to hostGroups and find provided hostGroup is present in it
+            resp_xml = XmlElementTree.fromstring(resp.content)
+
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                hostGroup_href = schild.attrib.get('href')
+                                hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
+                                if hg_resp.status_code != requests.codes.ok:
+                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
+                                else:
+                                    hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
+                                    for hostGroup in hg_resp_xml:
+                                        if 'HostGroup' in hostGroup.tag:
+                                            if hostGroup.attrib.get("name") == az:
+                                                az_found = True
+                                                break
+        return az_found
+
+
+    def get_pvdc_for_org(self, org_vdc, headers):
+        """ This method gets provider vdc references from organisation
+
+            Args:
+               org_vdc - name of the organisation VDC to find pvdc
+               headers - headers to make REST call
+
+            Returns:
+               None - if no pvdc href found else
+               pvdc_href - href to pvdc
+        """
+
+        #Get provider VDC references from vCD
+        pvdc_href = None
+        #url = '<vcd url>/api/admin/extension/providerVdcReferences'
+        url_list = [self.url, '/api/admin/extension/providerVdcReferences']
+        url = ''.join(url_list)
+
+        response = self.perform_request(req_type='GET',url=url, headers=headers)
+        if response.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}"\
+                               .format(url, response.status_code))
+        else:
+            xmlroot_response = XmlElementTree.fromstring(response.content)
+            for child in xmlroot_response:
+                if 'ProviderVdcReference' in child.tag:
+                    pvdc_href = child.attrib.get('href')
+                    #Get vdcReferences to find org
+                    pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+                    if pvdc_resp.status_code != requests.codes.ok:
+                        raise vimconn.vimconnException("REST API call {} failed. "\
+                                                       "Return status code {}"\
+                                                       .format(url, pvdc_resp.status_code))
+
+                    pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
+                    for child in pvdc_resp_xml:
+                        if 'Link' in child.tag:
+                            if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
+                                vdc_href = child.attrib.get('href')
+
+                                #Check if provided org is present in vdc
+                                vdc_resp = self.perform_request(req_type='GET',
+                                                                url=vdc_href,
+                                                                headers=headers)
+                                if vdc_resp.status_code != requests.codes.ok:
+                                    raise vimconn.vimconnException("REST API call {} failed. "\
+                                                                   "Return status code {}"\
+                                                                   .format(url, vdc_resp.status_code))
+                                vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
+                                for child in vdc_resp_xml:
+                                    if 'VdcReference' in child.tag:
+                                        if child.attrib.get('name') == org_vdc:
+                                            return pvdc_href
+
+
+    def get_resource_pool_details(self, pvdc_href, headers):
+        """ Method to get resource pool information.
+            Host groups are property of resource group.
+            To get host groups, we need to GET details of resource pool.
+
+            Args:
+                pvdc_href: href to pvdc details
+                headers: headers
+
+            Returns:
+                respool_href - Returns href link reference to resource pool
+        """
+        respool_href = None
+        resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}"\
+                               .format(pvdc_href, resp.status_code))
+        else:
+            respool_resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in respool_resp_xml:
+                if 'Link' in child.tag:
+                    if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
+                        respool_href = child.attrib.get("href")
+                        break
+        return respool_href
+
+
+    def log_message(self, msg):
+        """
+            Method to log error messages related to Affinity rule creation
+            in new_vminstance & raise Exception
+                Args :
+                    msg - Error message to be logged
+
+        """
+        #get token to connect vCD as a normal user
+        self.get_token()
+        self.logger.debug(msg)
+        raise vimconn.vimconnException(msg)
+
+
+    ##
+    ##
+    ##  based on current discussion
+    ##
+    ##
+    ##  server:
+    #   created: '2016-09-08T11:51:58'
+    #   description: simple-instance.linux1.1
+    #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
+    #   hostId: e836c036-74e7-11e6-b249-0800273e724c
+    #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
+    #   status: ACTIVE
+    #   error_msg:
+    #   interfaces: …
+    #
+    def get_vminstance(self, vim_vm_uuid=None):
+        """Returns the VM instance information from VIM"""
+
+        self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException(
+                "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
+        if not vm_info_dict:
+            self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+            raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+
+        status_key = vm_info_dict['status']
+        error = ''
+        try:
+            vm_dict = {'created': vm_info_dict['created'],
+                       'description': vm_info_dict['name'],
+                       'status': vcdStatusCode2manoFormat[int(status_key)],
+                       'hostId': vm_info_dict['vmuuid'],
+                       'error_msg': error,
+                       'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+
+            if 'interfaces' in vm_info_dict:
+                vm_dict['interfaces'] = vm_info_dict['interfaces']
+            else:
+                vm_dict['interfaces'] = []
+        except KeyError:
+            vm_dict = {'created': '',
+                       'description': '',
+                       'status': vcdStatusCode2manoFormat[int(-1)],
+                       'hostId': vm_info_dict['vmuuid'],
+                       'error_msg': "Inconsistency state",
+                       'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+
+        return vm_dict
+
+    def delete_vminstance(self, vm__vim_uuid, created_items=None):
+        """Method poweroff and remove VM instance from vcloud director network.
+
+        Args:
+            vm__vim_uuid: VM UUID
+
+        Returns:
+            Returns the instance identifier
+        """
+
+        self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
+
+        org, vdc = self.get_vdc_details()
+        vdc_obj = VDC(self.client, href=vdc.get('href'))
+        if vdc_obj is None:
+            self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
+                self.tenant_name))
+            raise vimconn.vimconnException(
+                "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        try:
+            vapp_name = self.get_namebyvappid(vm__vim_uuid)
+            if vapp_name is None:
+                self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+            self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
+
+            # Delete vApp and wait for status change if task executed and vApp is None.
+
+            if vapp:
+                if vapp_resource.get('deployed') == 'true':
+                    self.logger.info("Powering off vApp {}".format(vapp_name))
+                    #Power off vApp
+                    powered_off = False
+                    wait_time = 0
+                    while wait_time <= MAX_WAIT_TIME:
+                        power_off_task = vapp.power_off()
+                        result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
+
+                        if result.get('status') == 'success':
+                            powered_off = True
+                            break
+                        else:
+                            self.logger.info("Wait for vApp {} to power off".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+                    if not powered_off:
+                        self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
+                    else:
+                        self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
+
+                    #Undeploy vApp
+                    self.logger.info("Undeploy vApp {}".format(vapp_name))
+                    wait_time = 0
+                    undeployed = False
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = VApp(self.client, resource=vapp_resource)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                        undeploy_task = vapp.undeploy()
+
+                        result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
+                        if result.get('status') == 'success':
+                            undeployed = True
+                            break
+                        else:
+                            self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+
+                    if not undeployed:
+                        self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
+
+                # delete vapp
+                self.logger.info("Start deletion of vApp {} ".format(vapp_name))
+
+                if vapp is not None:
+                    wait_time = 0
+                    result = False
+
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = VApp(self.client, resource=vapp_resource)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+
+                        delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
+
+                        result = self.client.get_task_monitor().wait_for_success(task=delete_task)
+                        if result.get('status') == 'success':
+                            break
+                        else:
+                            self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+
+                    if result is None:
+                        self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
+                    else:
+                        self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+                        config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+                        catalog_list = self.get_image_list()
+                        try:
+                            config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
+                                                       if catalog_['name'] == config_drive_catalog_name][0]
+                        except IndexError:
+                            pass
+                        if config_drive_catalog_id:
+                            self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
+                                              'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+                            self.delete_image(config_drive_catalog_id)
+                        return vm__vim_uuid
+        except:
+            self.logger.debug(traceback.format_exc())
+            raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
+
+
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                #  CREATING (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces:
+                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+        """
+
+        self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
+
+        org,vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vms_dict = {}
+        nsx_edge_list = []
+        for vmuuid in vm_list:
+            vapp_name = self.get_namebyvappid(vmuuid)
+            if vapp_name is not None:
+
+                try:
+                    vm_pci_details = self.get_vm_pci_details(vmuuid)
+                    vdc_obj = VDC(self.client, href=vdc.get('href'))
+                    vapp_resource = vdc_obj.get_vapp(vapp_name)
+                    the_vapp = VApp(self.client, resource=vapp_resource)
+
+                    vm_details = {}
+                    for vm in the_vapp.get_all_vms():
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                        response = self.perform_request(req_type='GET',
+                                                        url=vm.get('href'),
+                                                        headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
+                                                            "status code : {}".format(vm.get('href'),
+                                                                                    response.content,
+                                                                               response.status_code))
+                            raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
+                                                                         "VM details")
+                        xmlroot = XmlElementTree.fromstring(response.content)
+
+                        
+                        result = response.content.replace("\n"," ")
+                        hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
+                        if hdd_match:
+                            hdd_mb = hdd_match.group(1)
+                            vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+                        cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
+                        if cpus_match:
+                            cpus = cpus_match.group(1)
+                            vm_details['cpus'] = int(cpus) if cpus else None
+                        memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                        vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
+                        vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
+                        vm_details['id'] = xmlroot.get('id')
+                        vm_details['name'] = xmlroot.get('name')
+                        vm_info = [vm_details]
+                        if vm_pci_details:
+                            vm_info[0].update(vm_pci_details)
+
+                        vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+
+                        # get networks
+                        vm_ip = None
+                        vm_mac = None
+                        networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
+                        for network in networks:
+                            mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
+                            vm_mac = mac_s.group(1) if mac_s else None
+                            ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
+                            vm_ip = ip_s.group(1) if ip_s else None
+
+                            if vm_ip is None:
+                                if not nsx_edge_list:
+                                    nsx_edge_list = self.get_edge_details()
+                                    if nsx_edge_list is None:
+                                        raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                       "Failed to get edge details from NSX Manager")
+                                if vm_mac is not None:
+                                    vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
+
+                            net_s = re.search('network="(.*?)"',network)
+                            network_name = net_s.group(1) if net_s else None
+
+                            vm_net_id = self.get_network_id_by_name(network_name)
+                            interface = {"mac_address": vm_mac,
+                                         "vim_net_id": vm_net_id,
+                                         "vim_interface_id": vm_net_id,
+                                         "ip_address": vm_ip}
+
+                            vm_dict["interfaces"].append(interface)
+
+                    # add a vm to vm dict
+                    vms_dict.setdefault(vmuuid, vm_dict)
+                    self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
+                except Exception as exp:
+                    self.logger.debug("Error in response {}".format(exp))
+                    self.logger.debug(traceback.format_exc())
+
+        return vms_dict
+
+
+    def get_edge_details(self):
+        """Get the NSX edge list from NSX Manager
+           Returns list of NSX edges
+        """
+        edge_list = []
+        rheaders = {'Content-Type': 'application/xml'}
+        nsx_api_url = '/api/4.0/edges'
+
+        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+        try:
+            resp = requests.get(self.nsx_manager + nsx_api_url,
+                                auth = (self.nsx_user, self.nsx_password),
+                                verify = False, headers = rheaders)
+            if resp.status_code == requests.codes.ok:
+                paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                for edge_pages in paged_Edge_List:
+                    if edge_pages.tag == 'edgePage':
+                        for edge_summary in edge_pages:
+                            if edge_summary.tag == 'pagingInfo':
+                                for element in edge_summary:
+                                    if element.tag == 'totalCount' and element.text == '0':
+                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                       .format(self.nsx_manager))
+
+                            if edge_summary.tag == 'edgeSummary':
+                                for element in edge_summary:
+                                    if element.tag == 'id':
+                                        edge_list.append(element.text)
+                    else:
+                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                       .format(self.nsx_manager))
+
+                if not edge_list:
+                    raise vimconn.vimconnException("get_edge_details: "\
+                                                   "No NSX edge details found: {}"
+                                                   .format(self.nsx_manager))
+                else:
+                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                    return edge_list
+            else:
+                self.logger.debug("get_edge_details: "
+                                  "Failed to get NSX edge details from NSX Manager: {}"
+                                  .format(resp.content))
+                return None
+
+        except Exception as exp:
+            self.logger.debug("get_edge_details: "\
+                              "Failed to get NSX edge details from NSX Manager: {}"
+                              .format(exp))
+            raise vimconn.vimconnException("get_edge_details: "\
+                                           "Failed to get NSX edge details from NSX Manager: {}"
+                                           .format(exp))
+
+
+    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+        """Get IP address details from NSX edges, using the MAC address
+           PARAMS: nsx_edges : List of NSX edges
+                   mac_address : Find IP address corresponding to this MAC address
+           Returns: IP address corrresponding to the provided MAC address
+        """
+
+        ip_addr = None
+        rheaders = {'Content-Type': 'application/xml'}
+
+        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+
+        try:
+            for edge in nsx_edges:
+                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+
+                resp = requests.get(self.nsx_manager + nsx_api_url,
+                                    auth = (self.nsx_user, self.nsx_password),
+                                    verify = False, headers = rheaders)
+
+                if resp.status_code == requests.codes.ok:
+                    dhcp_leases = XmlElementTree.fromstring(resp.text)
+                    for child in dhcp_leases:
+                        if child.tag == 'dhcpLeaseInfo':
+                            dhcpLeaseInfo = child
+                            for leaseInfo in dhcpLeaseInfo:
+                                for elem in leaseInfo:
+                                    if (elem.tag)=='macAddress':
+                                        edge_mac_addr = elem.text
+                                    if (elem.tag)=='ipAddress':
+                                        ip_addr = elem.text
+                                if edge_mac_addr is not None:
+                                    if edge_mac_addr == mac_address:
+                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                          .format(ip_addr, mac_address,edge))
+                                        return ip_addr
+                else:
+                    self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                      .format(resp.content))
+
+            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+            return None
+
+        except XmlElementTree.ParseError as Err:
+            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+
+
+    def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
+        """Send and action over a VM instance from VIM
+        Returns the vm_id if the action was successfully sent to the VIM"""
+
+        self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
+        if vm__vim_uuid is None or action_dict is None:
+            raise vimconn.vimconnException("Invalid request. VM id or action is None.")
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise  vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vapp_name = self.get_namebyvappid(vm__vim_uuid)
+        if vapp_name is None:
+            self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+            raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+        else:
+            self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
+            if "start" in action_dict:
+                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                self.instance_actions_result("start", result, vapp_name)
+            elif "rebuild" in action_dict:
+                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                rebuild_task = vapp.deploy(power_on=True)
+                result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
+                self.instance_actions_result("rebuild", result, vapp_name)
+            elif "pause" in action_dict:
+                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                pause_task = vapp.undeploy(action='suspend')
+                result = self.client.get_task_monitor().wait_for_success(task=pause_task)
+                self.instance_actions_result("pause", result, vapp_name)
+            elif "resume" in action_dict:
+                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                self.instance_actions_result("resume", result, vapp_name)
+            elif "shutoff" in action_dict or "shutdown" in action_dict:
+                action_name , value = action_dict.items()[0]
+                #For python3
+                #action_name , value = list(action_dict.items())[0]
+                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
+                shutdown_task = vapp.shutdown()
+                result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
+                if action_name == "shutdown":
+                    self.instance_actions_result("shutdown", result, vapp_name)
+                else:
+                    self.instance_actions_result("shutoff", result, vapp_name)
+            elif "forceOff" in action_dict:
+                result = vapp.undeploy(action='powerOff')
+                self.instance_actions_result("forceOff", result, vapp_name)
+            elif "reboot" in action_dict:
+                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                reboot_task = vapp.reboot()
+                self.client.get_task_monitor().wait_for_success(task=reboot_task)
+            else:
+                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+            return vm__vim_uuid
+        except Exception as exp :
+            self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+
+    def instance_actions_result(self, action, result, vapp_name):
+        if result.get('status') == 'success':
+            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+        else:
+            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
+
+    def get_vminstance_console(self, vm_id, console_type="novnc"):
+        """
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types,
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
+        """
+        console_dict = {}
+
+        if console_type==None or console_type=='novnc':
+
+            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='POST',
+                                         url=url_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                    response.status_code))
+                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                     "VM Mks ticket details")
+            s = re.search("<Host>(.*?)</Host>",response.content)
+            console_dict['server'] = s.group(1) if s else None
+            s1 = re.search("<Port>(\d+)</Port>",response.content)
+            console_dict['port'] = s1.group(1) if s1 else None
+
+
+            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='POST',
+                                         url=url_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                    response.status_code))
+                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                     "VM console details")
+            s = re.search(">.*?/(vm-\d+.*)</",response.content)
+            console_dict['suffix'] = s.group(1) if s else None
+            console_dict['protocol'] = "https"
+
+        return console_dict
+
+    # NOT USED METHODS in current version
+
+    def host_vim2gui(self, host, server_dict):
+        """Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        """
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_hosts_info(self):
+        """Get the information of deployed hosts
+        Returns the hosts content"""
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_hosts(self, vim_tenant):
+        """Get the hosts and deployed instances
+        Returns the hosts content"""
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_processor_rankings(self):
+        """Get the processor rankings in the VIM database"""
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_host(self, host_data):
+        """Adds a new host to VIM"""
+        '''Returns status code of the VIM response'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_external_port(self, port_data):
+        """Adds a external port to VIM"""
+        '''Returns the port identifier'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_external_network(self, net_name, net_type):
+        """Adds a external network to VIM (shared)"""
+        '''Returns the network identifier'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def connect_port_network(self, port_id, network_id, admin=False):
+        """Connects a external port to a network"""
+        '''Returns status code of the VIM response'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_vminstancefromJSON(self, vm_data):
+        """Adds a VM instance to VIM"""
+        '''Returns the instance identifier'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_network_name_by_id(self, network_uuid=None):
+        """Method gets vcloud director network named based on supplied uuid.
+
+        Args:
+            network_uuid: network_id
+
+        Returns:
+            The return network name.
+        """
+
+        if not network_uuid:
+            return None
+
+        try:
+            org_dict = self.get_org(self.org_uuid)
+            if 'networks' in org_dict:
+                org_network_dict = org_dict['networks']
+                for net_uuid in org_network_dict:
+                    if net_uuid == network_uuid:
+                        return org_network_dict[net_uuid]
+        except:
+            self.logger.debug("Exception in get_network_name_by_id")
+            self.logger.debug(traceback.format_exc())
+
+        return None
+
+    def get_network_id_by_name(self, network_name=None):
+        """Method gets vcloud director network uuid based on supplied name.
+
+        Args:
+            network_name: network_name
+        Returns:
+            The return network uuid.
+            network_uuid: network_id
+        """
+
+        if not network_name:
+            self.logger.debug("get_network_id_by_name() : Network name is empty")
+            return None
+
+        try:
+            org_dict = self.get_org(self.org_uuid)
+            if org_dict and 'networks' in org_dict:
+                org_network_dict = org_dict['networks']
+                for net_uuid,net_name in org_network_dict.items():
+                #For python3
+                #for net_uuid,net_name in org_network_dict.items():
+                    if net_name == network_name:
+                        return net_uuid
+
+        except KeyError as exp:
+            self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
+
+        return None
+
+    def list_org_action(self):
+        """
+        Method leverages vCloud director and query for available organization for particular user
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return XML respond
+        """
+        url_list = [self.url, '/api/org']
+        vm_list_rest_call = ''.join(url_list)
+
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                     url=vm_list_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+
+        return None
+
+    def get_org_action(self, org_uuid=None):
+        """
+        Method leverages vCloud director and retrieve available object for organization.
+
+        Args:
+            org_uuid - vCD organization uuid
+            self.client - is active connection.
+
+            Returns:
+                The return XML respond
+        """
+
+        if org_uuid is None:
+            return None
+
+        url_list = [self.url, '/api/org/', org_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+        return None
+
+    def get_org(self, org_uuid=None):
+        """
+        Method retrieves available organization in vCloud Director
+
+        Args:
+            org_uuid - is a organization uuid.
+
+            Returns:
+                The return dictionary with following key
+                    "network" - for network list under the org
+                    "catalogs" - for network list under the org
+                    "vdcs" - for vdc list under org
+        """
+
+        org_dict = {}
+
+        if org_uuid is None:
+            return org_dict
+
+        content = self.get_org_action(org_uuid=org_uuid)
+        try:
+            vdc_list = {}
+            network_list = {}
+            catalog_list = {}
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for child in vm_list_xmlroot:
+                if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
+                    vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                    org_dict['vdcs'] = vdc_list
+                if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
+                    network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                    org_dict['networks'] = network_list
+                if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
+                    catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                    org_dict['catalogs'] = catalog_list
+        except:
+            pass
+
+        return org_dict
+
+    def get_org_list(self):
+        """
+        Method retrieves available organization in vCloud Director
+
+        Args:
+            vca - is active VCA connection.
+
+            Returns:
+                The return dictionary and key for each entry VDC UUID
+        """
+
+        org_dict = {}
+
+        content = self.list_org_action()
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'Org':
+                    org_uuid = vm_xml.attrib['href'].split('/')[-1:]
+                    org_dict[org_uuid[0]] = vm_xml.attrib['name']
+        except:
+            pass
+
+        return org_dict
+
+    def vms_view_action(self, vdc_name=None):
+        """ Method leverages vCloud director vms query call
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return XML respond
+        """
+        vca = self.connect()
+        if vdc_name is None:
+            return None
+
+        url_list = [vca.host, '/api/vms/query']
+        vm_list_rest_call = ''.join(url_list)
+
+        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+            refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
+                          vca.vcloud_session.organization.Link)
+            #For python3
+            #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
+            #        ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
+            if len(refs) == 1:
+                response = Http.get(url=vm_list_rest_call,
+                                    headers=vca.vcloud_session.get_vcloud_headers(),
+                                    verify=vca.verify,
+                                    logger=vca.logger)
+                if response.status_code == requests.codes.ok:
+                    return response.content
+
+        return None
+
+    def get_vapp_list(self, vdc_name=None):
+        """
+        Method retrieves vApp list deployed vCloud director and returns a dictionary
+        contains a list of all vapp deployed for queried VDC.
+        The key for a dictionary is vApp UUID
+
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+
+        vapp_dict = {}
+        if vdc_name is None:
+            return vapp_dict
+
+        content = self.vms_view_action(vdc_name=vdc_name)
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'VMRecord':
+                    if vm_xml.attrib['isVAppTemplate'] == 'true':
+                        rawuuid = vm_xml.attrib['container'].split('/')[-1:]
+                        if 'vappTemplate-' in rawuuid[0]:
+                            # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+                            # vm and use raw UUID as key
+                            vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
+        except:
+            pass
+
+        return vapp_dict
+
+    def get_vm_list(self, vdc_name=None):
+        """
+        Method retrieves VM's list deployed vCloud director. It returns a dictionary
+        contains a list of all VM's deployed for queried VDC.
+        The key for a dictionary is VM UUID
+
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+        vm_dict = {}
+
+        if vdc_name is None:
+            return vm_dict
+
+        content = self.vms_view_action(vdc_name=vdc_name)
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'VMRecord':
+                    if vm_xml.attrib['isVAppTemplate'] == 'false':
+                        rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                        if 'vm-' in rawuuid[0]:
+                            # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+                            #  vm and use raw UUID as key
+                            vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+        except:
+            pass
+
+        return vm_dict
+
+    def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
+        """
+        Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
+        contains a list of all VM's deployed for queried VDC.
+        The key for a dictionary is VM UUID
+
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+        vm_dict = {}
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+        if vdc_name is None:
+            return vm_dict
+
+        content = self.vms_view_action(vdc_name=vdc_name)
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
+                    # lookup done by UUID
+                    if isuuid:
+                        if vapp_name in vm_xml.attrib['container']:
+                            rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                            if 'vm-' in rawuuid[0]:
+                                vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+                                break
+                    # lookup done by Name
+                    else:
+                        if vapp_name in vm_xml.attrib['name']:
+                            rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                            if 'vm-' in rawuuid[0]:
+                                vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+                                break
+        except:
+            pass
+
+        return vm_dict
+
+    def get_network_action(self, network_uuid=None):
+        """
+        Method leverages vCloud director and query network based on network uuid
+
+        Args:
+            vca - is active VCA connection.
+            network_uuid - is a network uuid
+
+            Returns:
+                The return XML respond
+        """
+
+        if network_uuid is None:
+            return None
+
+        url_list = [self.url, '/api/network/', network_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            #Retry login if session expired & retry sending request
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+
+        return None
+
+    def get_vcd_network(self, network_uuid=None):
+        """
+        Method retrieves available network from vCloud Director
+
+        Args:
+            network_uuid - is VCD network UUID
+
+        Each element serialized as key : value pair
+
+        Following keys available for access.    network_configuration['Gateway'}
+        <Configuration>
+          <IpScopes>
+            <IpScope>
+                <IsInherited>true</IsInherited>
+                <Gateway>172.16.252.100</Gateway>
+                <Netmask>255.255.255.0</Netmask>
+                <Dns1>172.16.254.201</Dns1>
+                <Dns2>172.16.254.202</Dns2>
+                <DnsSuffix>vmwarelab.edu</DnsSuffix>
+                <IsEnabled>true</IsEnabled>
+                <IpRanges>
+                    <IpRange>
+                        <StartAddress>172.16.252.1</StartAddress>
+                        <EndAddress>172.16.252.99</EndAddress>
+                    </IpRange>
+                </IpRanges>
+            </IpScope>
+        </IpScopes>
+        <FenceMode>bridged</FenceMode>
+
+        Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+
+        network_configuration = {}
+        if network_uuid is None:
+            return network_uuid
+
+        try:
+            content = self.get_network_action(network_uuid=network_uuid)
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+
+            network_configuration['status'] = vm_list_xmlroot.get("status")
+            network_configuration['name'] = vm_list_xmlroot.get("name")
+            network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
+
+            for child in vm_list_xmlroot:
+                if child.tag.split("}")[1] == 'IsShared':
+                    network_configuration['isShared'] = child.text.strip()
+                if child.tag.split("}")[1] == 'Configuration':
+                    for configuration in child.iter():
+                        tagKey = configuration.tag.split("}")[1].strip()
+                        if tagKey != "":
+                            network_configuration[tagKey] = configuration.text.strip()
+            return network_configuration
+        except Exception as exp :
+            self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
+
+        return network_configuration
+
+    def delete_network_action(self, network_uuid=None):
+        """
+        Method delete given network from vCloud director
+
+        Args:
+            network_uuid - is a network uuid that client wish to delete
+
+            Returns:
+                The return None or XML respond or false
+        """
+        client = self.connect_as_admin()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+        if network_uuid is None:
+            return False
+
+        url_list = [self.url, '/api/admin/network/', network_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='DELETE',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            if response.status_code == 202:
+                return True
+
+        return False
+
+    def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                       ip_profile=None, isshared='true'):
+        """
+        Method create network in vCloud director
+
+        Args:
+            network_name - is network name to be created.
+            net_type - can be 'bridge','data','ptp','mgmt'.
+            ip_profile is a dict containing the IP parameters of the network
+            isshared - is a boolean
+            parent_network_uuid - is parent provider vdc network that will be used for mapping.
+            It optional attribute. by default if no parent network indicate the first available will be used.
+
+            Returns:
+                The return network uuid or return None
+        """
+
+        new_network_name = [network_name, '-', str(uuid.uuid4())]
+        content = self.create_network_rest(network_name=''.join(new_network_name),
+                                           ip_profile=ip_profile,
+                                           net_type=net_type,
+                                           parent_network_uuid=parent_network_uuid,
+                                           isshared=isshared)
+        if content is None:
+            self.logger.debug("Failed create network {}.".format(network_name))
+            return None
+
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            vcd_uuid = vm_list_xmlroot.get('id').split(":")
+            if len(vcd_uuid) == 4:
+                self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+                return vcd_uuid[3]
+        except:
+            self.logger.debug("Failed create network {}".format(network_name))
+            return None
+
+    def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                            ip_profile=None, isshared='true'):
+        """
+        Method create network in vCloud director
+
+        Args:
+            network_name - is network name to be created.
+            net_type - can be 'bridge','data','ptp','mgmt'.
+            ip_profile is a dict containing the IP parameters of the network
+            isshared - is a boolean
+            parent_network_uuid - is parent provider vdc network that will be used for mapping.
+            It optional attribute. by default if no parent network indicate the first available will be used.
+
+            Returns:
+                The return network uuid or return None
+        """
+        client_as_admin = self.connect_as_admin()
+        if not client_as_admin:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+        if network_name is None:
+            return None
+
+        url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
+        vm_list_rest_call = ''.join(url_list)
+
+        if client_as_admin._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+
+            provider_network = None
+            available_networks = None
+            add_vdc_rest_url = None
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                          response.status_code))
+                return None
+            else:
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                    for child in vm_list_xmlroot:
+                        if child.tag.split("}")[1] == 'ProviderVdcReference':
+                            provider_network = child.attrib.get('href')
+                            # application/vnd.vmware.admin.providervdc+xml
+                        if child.tag.split("}")[1] == 'Link':
+                            if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
+                                    and child.attrib.get('rel') == 'add':
+                                add_vdc_rest_url = child.attrib.get('href')
+                except:
+                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                    self.logger.debug("Respond body {}".format(response.content))
+                    return None
+
+            # find  pvdc provided available network
+            response = self.perform_request(req_type='GET',
+                                            url=provider_network,
+                                            headers=headers)
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                          response.status_code))
+                return None
+
+            if parent_network_uuid is None:
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                    for child in vm_list_xmlroot.iter():
+                        if child.tag.split("}")[1] == 'AvailableNetworks':
+                            for networks in child.iter():
+                                # application/vnd.vmware.admin.network+xml
+                                if networks.attrib.get('href') is not None:
+                                    available_networks = networks.attrib.get('href')
+                                    break
+                except:
+                    return None
+
+            try:
+                #Configure IP profile of the network
+                ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+
+                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                    subnet_rand = random.randint(0, 255)
+                    ip_base = "192.168.{}.".format(subnet_rand)
+                    ip_profile['subnet_address'] = ip_base + "0/24"
+                else:
+                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
+                if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+                    ip_profile['gateway_address']=ip_base + "1"
+                if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+                    ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+                if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+                    ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+                if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+                    ip_profile['dhcp_start_address']=ip_base + "3"
+                if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+                    ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+                if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+                    ip_profile['dns_address']=ip_base + "2"
+
+                gateway_address=ip_profile['gateway_address']
+                dhcp_count=int(ip_profile['dhcp_count'])
+                subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+                if ip_profile['dhcp_enabled']==True:
+                    dhcp_enabled='true'
+                else:
+                    dhcp_enabled='false'
+                dhcp_start_address=ip_profile['dhcp_start_address']
+
+                #derive dhcp_end_address from dhcp_start_address & dhcp_count
+                end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+                end_ip_int += dhcp_count - 1
+                dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+
+                ip_version=ip_profile['ip_version']
+                dns_address=ip_profile['dns_address']
+            except KeyError as exp:
+                self.logger.debug("Create Network REST: Key error {}".format(exp))
+                raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
+
+            # either use client provided UUID or search for a first available
+            #  if both are not defined we return none
+            if parent_network_uuid is not None:
+                provider_network = None
+                available_networks = None
+                add_vdc_rest_url = None
+
+                url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
+                add_vdc_rest_url = ''.join(url_list)
+
+                url_list = [self.url, '/api/admin/network/', parent_network_uuid]
+                available_networks = ''.join(url_list)
+
+            #Creating all networks as Direct Org VDC type networks.
+            #Unused in case of Underlay (data/ptp) network interface.
+            fence_mode="isolated"
+            is_inherited='false'
+            dns_list = dns_address.split(";")
+            dns1 = dns_list[0]
+            dns2_text = ""
+            if len(dns_list) >= 2:
+                dns2_text = "\n                                                <Dns2>{}</Dns2>\n".format(dns_list[1])
+            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                            <Description>Openmano created</Description>
+                                    <Configuration>
+                                        <IpScopes>
+                                            <IpScope>
+                                                <IsInherited>{1:s}</IsInherited>
+                                                <Gateway>{2:s}</Gateway>
+                                                <Netmask>{3:s}</Netmask>
+                                                <Dns1>{4:s}</Dns1>{5:s}
+                                                <IsEnabled>{6:s}</IsEnabled>
+                                                <IpRanges>
+                                                    <IpRange>
+                                                        <StartAddress>{7:s}</StartAddress>
+                                                        <EndAddress>{8:s}</EndAddress>
+                                                    </IpRange>
+                                                </IpRanges>
+                                            </IpScope>
+                                        </IpScopes>
+                                        <FenceMode>{9:s}</FenceMode>
+                                    </Configuration>
+                                    <IsShared>{10:s}</IsShared>
+                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                    subnet_address, dns1, dns2_text, dhcp_enabled,
+                                                    dhcp_start_address, dhcp_end_address,
+                                                    fence_mode, isshared)
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
+            try:
+                response = self.perform_request(req_type='POST',
+                                           url=add_vdc_rest_url,
+                                           headers=headers,
+                                           data=data)
+
+                if response.status_code != 201:
+                    self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+                                      .format(response.status_code,response.content))
+                else:
+                    network_task = self.get_task_from_response(response.content)
+                    self.logger.debug("Create Network REST : Waiting for Network creation complete")
+                    time.sleep(5)
+                    result = self.client.get_task_monitor().wait_for_success(task=network_task)
+                    if result.get('status') == 'success':
+                        return response.content
+                    else:
+                        self.logger.debug("create_network_rest task failed. Network Create response : {}"
+                                          .format(response.content))
+            except Exception as exp:
+                self.logger.debug("create_network_rest : Exception : {} ".format(exp))
+
+        return None
+
+    def convert_cidr_to_netmask(self, cidr_ip=None):
+        """
+        Method sets convert CIDR netmask address to normal IP format
+        Args:
+            cidr_ip : CIDR IP address
+            Returns:
+                netmask : Converted netmask
+        """
+        if cidr_ip is not None:
+            if '/' in cidr_ip:
+                network, net_bits = cidr_ip.split('/')
+                netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+            else:
+                netmask = cidr_ip
+            return netmask
+        return None
+
+    def get_provider_rest(self, vca=None):
+        """
+        Method gets provider vdc view from vcloud director
+
+        Args:
+            network_name - is network name to be created.
+            parent_network_uuid - is parent provider vdc network that will be used for mapping.
+            It optional attribute. by default if no parent network indicate the first available will be used.
+
+            Returns:
+                The return xml content of respond or None
+        """
+
+        url_list = [self.url, '/api/admin']
+        if vca:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=''.join(url_list),
+                                            headers=headers)
+
+        if response.status_code == requests.codes.ok:
+            return response.content
+        return None
+
+    def create_vdc(self, vdc_name=None):
+
+        vdc_dict = {}
+
+        xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
+        if xml_content is not None:
+            try:
+                task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
+                for child in task_resp_xmlroot:
+                    if child.tag.split("}")[1] == 'Owner':
+                        vdc_id = child.attrib.get('href').split("/")[-1]
+                        vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
+                        return vdc_dict
+            except:
+                self.logger.debug("Respond body {}".format(xml_content))
+
+        return None
+
+    def create_vdc_from_tmpl_rest(self, vdc_name=None):
+        """
+        Method create vdc in vCloud director based on VDC template.
+        it uses pre-defined template.
+
+        Args:
+            vdc_name -  name of a new vdc.
+
+            Returns:
+                The return xml content of respond or None
+        """
+        # pre-requesite atleast one vdc template should be available in vCD
+        self.logger.info("Creating new vdc {}".format(vdc_name))
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        if vdc_name is None:
+            return None
+
+        url_list = [self.url, '/api/vdcTemplates']
+        vm_list_rest_call = ''.join(url_list)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=vm_list_rest_call,
+                                        headers=headers)
+
+        # container url to a template
+        vdc_template_ref = None
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+            for child in vm_list_xmlroot:
+                # application/vnd.vmware.admin.providervdc+xml
+                # we need find a template from witch we instantiate VDC
+                if child.tag.split("}")[1] == 'VdcTemplate':
+                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
+                        vdc_template_ref = child.attrib.get('href')
+        except:
+            self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+            self.logger.debug("Respond body {}".format(response.content))
+            return None
+
+        # if we didn't found required pre defined template we return None
+        if vdc_template_ref is None:
+            return None
+
+        try:
+            # instantiate vdc
+            url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
+            vm_list_rest_call = ''.join(url_list)
+            data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                                        <Source href="{1:s}"></Source>
+                                        <Description>opnemano</Description>
+                                        </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
+
+            response = self.perform_request(req_type='POST',
+                                            url=vm_list_rest_call,
+                                            headers=headers,
+                                            data=data)
+
+            vdc_task = self.get_task_from_response(response.content)
+            self.client.get_task_monitor().wait_for_success(task=vdc_task)
+
+            # if we all ok we respond with content otherwise by default None
+            if response.status_code >= 200 and response.status_code < 300:
+                return response.content
+            return None
+        except:
+            self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+            self.logger.debug("Respond body {}".format(response.content))
+
+        return None
+
+    def create_vdc_rest(self, vdc_name=None):
+        """
+        Method create network in vCloud director
+
+        Args:
+            vdc_name - vdc name to be created
+            Returns:
+                The return response
+        """
+
+        self.logger.info("Creating new vdc {}".format(vdc_name))
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        if vdc_name is None:
+            return None
+
+        url_list = [self.url, '/api/admin/org/', self.org_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+
+            provider_vdc_ref = None
+            add_vdc_rest_url = None
+            available_networks = None
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                          response.status_code))
+                return None
+            else:
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                    for child in vm_list_xmlroot:
+                        # application/vnd.vmware.admin.providervdc+xml
+                        if child.tag.split("}")[1] == 'Link':
+                            if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
+                                    and child.attrib.get('rel') == 'add':
+                                add_vdc_rest_url = child.attrib.get('href')
+                except:
+                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                    self.logger.debug("Respond body {}".format(response.content))
+                    return None
+
+                response = self.get_provider_rest(vca=vca)
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response)
+                    for child in vm_list_xmlroot:
+                        if child.tag.split("}")[1] == 'ProviderVdcReferences':
+                            for sub_child in child:
+                                provider_vdc_ref = sub_child.attrib.get('href')
+                except:
+                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                    self.logger.debug("Respond body {}".format(response))
+                    return None
+
+                if add_vdc_rest_url is not None and provider_vdc_ref is not None:
+                    data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
+                            <AllocationModel>ReservationPool</AllocationModel>
+                            <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
+                            <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
+                            </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
+                            <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
+                            <ProviderVdcReference
+                            name="Main Provider"
+                            href="{2:s}" />
+                    <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
+                                                                                                  escape(vdc_name),
+                                                                                                  provider_vdc_ref)
+
+                    headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
+
+                    response = self.perform_request(req_type='POST',
+                                                    url=add_vdc_rest_url,
+                                                    headers=headers,
+                                                    data=data)
+
+                    # if we all ok we respond with content otherwise by default None
+                    if response.status_code == 201:
+                        return response.content
+        return None
+
+    def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+        """
+        Method retrieve vapp detail from vCloud director
+
+        Args:
+            vapp_uuid - is vapp identifier.
+
+            Returns:
+                The return network uuid or return None
+        """
+
+        parsed_respond = {}
+        vca = None
+
+        if need_admin_access:
+            vca = self.connect_as_admin()
+        else:
+            vca = self.client
+
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        if vapp_uuid is None:
+            return None
+
+        url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
+        get_vapp_restcall = ''.join(url_list)
+
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=get_vapp_restcall,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                if need_admin_access == False:
+                    response = self.retry_rest('GET', get_vapp_restcall)
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
+                                                                                          response.status_code))
+                return parsed_respond
+
+            try:
+                xmlroot_respond = XmlElementTree.fromstring(response.content)
+                parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
+
+                namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                              'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                              'vmw': 'http://www.vmware.com/schema/ovf',
+                              'vm': 'http://www.vmware.com/vcloud/v1.5',
+                              'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                              "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+                              "xmlns":"http://www.vmware.com/vcloud/v1.5"
+                             }
+
+                created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
+                if created_section is not None:
+                    parsed_respond['created'] = created_section.text
+
+                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
+                if network_section is not None and 'networkName' in network_section.attrib:
+                    parsed_respond['networkname'] = network_section.attrib['networkName']
+
+                ipscopes_section = \
+                    xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
+                                         namespaces)
+                if ipscopes_section is not None:
+                    for ipscope in ipscopes_section:
+                        for scope in ipscope:
+                            tag_key = scope.tag.split("}")[1]
+                            if tag_key == 'IpRanges':
+                                ip_ranges = scope.getchildren()
+                                for ipblock in ip_ranges:
+                                    for block in ipblock:
+                                        parsed_respond[block.tag.split("}")[1]] = block.text
+                            else:
+                                parsed_respond[tag_key] = scope.text
+
+                # parse children section for other attrib
+                children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                if children_section is not None:
+                    parsed_respond['name'] = children_section.attrib['name']
+                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+                     if  "nestedHypervisorEnabled" in children_section.attrib else None
+                    parsed_respond['deployed'] = children_section.attrib['deployed']
+                    parsed_respond['status'] = children_section.attrib['status']
+                    parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
+                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
+                    nic_list = []
+                    for adapters in network_adapter:
+                        adapter_key = adapters.tag.split("}")[1]
+                        if adapter_key == 'PrimaryNetworkConnectionIndex':
+                            parsed_respond['primarynetwork'] = adapters.text
+                        if adapter_key == 'NetworkConnection':
+                            vnic = {}
+                            if 'network' in adapters.attrib:
+                                vnic['network'] = adapters.attrib['network']
+                            for adapter in adapters:
+                                setting_key = adapter.tag.split("}")[1]
+                                vnic[setting_key] = adapter.text
+                            nic_list.append(vnic)
+
+                    for link in children_section:
+                        if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                            if link.attrib['rel'] == 'screen:acquireTicket':
+                                parsed_respond['acquireTicket'] = link.attrib
+                            if link.attrib['rel'] == 'screen:acquireMksTicket':
+                                parsed_respond['acquireMksTicket'] = link.attrib
+
+                    parsed_respond['interfaces'] = nic_list
+                    vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                    if vCloud_extension_section is not None:
+                        vm_vcenter_info = {}
+                        vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                        vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                        if vmext is not None:
+                            vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                        parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+
+                    virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+                    vm_virtual_hardware_info = {}
+                    if virtual_hardware_section is not None:
+                        for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+                            if item.find("rasd:Description",namespaces).text == "Hard disk":
+                                disk_size = item.find("rasd:HostResource" ,namespaces
+                                                ).attrib["{"+namespaces['vm']+"}capacity"]
+
+                                vm_virtual_hardware_info["disk_size"]= disk_size
+                                break
+
+                        for link in virtual_hardware_section:
+                            if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                                if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+                                    vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+                                    break
+
+                    parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+            except Exception as exp :
+                self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+        return parsed_respond
+
+    def acquire_console(self, vm_uuid=None):
+
+        if vm_uuid is None:
+            return None
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
+            console_dict = vm_dict['acquireTicket']
+            console_rest_call = console_dict['href']
+
+            response = self.perform_request(req_type='POST',
+                                            url=console_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('POST', console_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+
+        return None
+
+    def modify_vm_disk(self, vapp_uuid, flavor_disk):
+        """
+        Method retrieve vm disk details
+
+        Args:
+            vapp_uuid - is vapp identifier.
+            flavor_disk - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        status = None
+        try:
+            #Flavor disk is in GB convert it into MB
+            flavor_disk = int(flavor_disk) * 1024
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            if vm_details:
+                vm_name = vm_details["name"]
+                self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+                disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+
+                self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+
+                if flavor_disk > vm_disk:
+                    status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+                    self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+                                                         vm_disk,  flavor_disk ))
+                else:
+                    status = True
+                    self.logger.info("No need to modify disk of VM {}".format(vm_name))
+
+            return status
+        except Exception as exp:
+            self.logger.info("Error occurred while modifing disk size {}".format(exp))
+
+
+    def modify_vm_disk_rest(self, disk_href , disk_size):
+        """
+        Method retrieve modify vm disk size
+
+        Args:
+            disk_href - vCD API URL to GET and PUT disk data
+            disk_size - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        if disk_href is None or disk_size is None:
+            return None
+
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=disk_href,
+                                                headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            return None
+        try:
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    disk_item = item.find("rasd:HostResource" ,namespaces )
+                    if disk_item is not None:
+                        disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+                        break
+
+            data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+                                             xml_declaration=True)
+
+            #Send PUT request to modify disk size
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = self.perform_request(req_type='PUT',
+                                                url=disk_href,
+                                                headers=headers,
+                                                data=data)
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, data)
+
+            if response.status_code != 202:
+                self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            else:
+                modify_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
+                if result.get('status') == 'success':
+                    return True
+                else:
+                    return False
+            return None
+
+        except Exception as exp :
+                self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+                return None
+
+    def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
+        """
+            Method to attach pci devices to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                pci_devices - pci devices infromation as specified in VNFD (flavor)
+
+            Returns:
+                The status of add pci device task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_pci_devices = len(pci_devices)
+                if no_of_pci_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get PCI devies from host on which vapp is currently installed
+                        avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+
+                        if avilable_pci_devices is None:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
+                                                                content,
+                                                                no_of_pci_devices
+                                                                )
+
+                            if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                                #Migrate vm to the host where PCI devices are availble
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                            #Add PCI devices one by one
+                            for pci_device in avilable_pci_devices:
+                                task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble PCI devices required for VM {}".format(
+                                                                            no_of_pci_devices,
+                                                                            vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble PCI devices required for VM {}".format(
+                                                                            no_of_pci_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about PCI devices {} ",pci_devices)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding PCI devices {} ",error)
+        return None, vm_obj, vcenter_conect
+
+    def get_vm_obj(self, content, mob_id):
+        """
+            Method to get the vsphere VM object associated with a given morf ID
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                content - vCenter content object
+                mob_id - mob_id of VM
+
+            Returns:
+                    VM and host object
+        """
+        vm_obj = None
+        host_obj = None
+        try :
+            container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                        [vim.VirtualMachine], True
+                                                        )
+            for vm in container.view:
+                mobID = vm._GetMoId()
+                if mobID == mob_id:
+                    vm_obj = vm
+                    host_obj = vm_obj.runtime.host
+                    break
+        except Exception as exp:
+            self.logger.error("Error occurred while finding VM object : {}".format(exp))
+        return host_obj, vm_obj
+
+    def get_pci_devices(self, host, need_devices):
+        """
+            Method to get the details of pci devices on given host
+             Args:
+                host - vSphere host object
+                need_devices - number of pci devices needed on host
+
+             Returns:
+                array of pci devices
+        """
+        all_devices = []
+        all_device_ids = []
+        used_devices_ids = []
+
+        try:
+            if host:
+                pciPassthruInfo = host.config.pciPassthruInfo
+                pciDevies = host.hardware.pciDevice
+
+            for pci_status in pciPassthruInfo:
+                if pci_status.passthruActive:
+                    for device in pciDevies:
+                        if device.id == pci_status.id:
+                            all_device_ids.append(device.id)
+                            all_devices.append(device)
+
+            #check if devices are in use
+            avalible_devices = all_devices
+            for vm in host.vm:
+                if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+                    vm_devices = vm.config.hardware.device
+                    for device in vm_devices:
+                        if type(device) is vim.vm.device.VirtualPCIPassthrough:
+                            if device.backing.id in all_device_ids:
+                                for use_device in avalible_devices:
+                                    if use_device.id == device.backing.id:
+                                        avalible_devices.remove(use_device)
+                                used_devices_ids.append(device.backing.id)
+                                self.logger.debug("Device {} from devices {}"\
+                                        "is in use".format(device.backing.id,
+                                                           device)
+                                            )
+            if len(avalible_devices) < need_devices:
+                self.logger.debug("Host {} don't have {} number of active devices".format(host,
+                                                                            need_devices))
+                self.logger.debug("found only {} devives {}".format(len(avalible_devices),
+                                                                    avalible_devices))
+                return None
+            else:
+                required_devices = avalible_devices[:need_devices]
+                self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
+                                                            len(avalible_devices),
+                                                            host,
+                                                            need_devices))
+                self.logger.info("Retruning {} devices as {}".format(need_devices,
+                                                                required_devices ))
+                return required_devices
+
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+
+        return None
+
+    def get_host_and_PCIdevices(self, content, need_devices):
+        """
+         Method to get the details of pci devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                need_devices - number of pci devices needed on host
+
+            Returns:
+                 array of pci devices and host object
+        """
+        host_obj = None
+        pci_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_pci_devices(host, need_devices)
+                    if devices:
+                        host_obj = host
+                        pci_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+
+        return host_obj,pci_device_objs
+
+    def relocate_vm(self, dest_host, vm) :
+        """
+         Method to get the relocate VM to new host
+
+            Args:
+                dest_host - vSphere host object
+                vm - vSphere VM object
+
+            Returns:
+                task object
+        """
+        task = None
+        try:
+            relocate_spec = vim.vm.RelocateSpec(host=dest_host)
+            task = vm.Relocate(relocate_spec)
+            self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+        except Exception as exp:
+            self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
+                                                                            dest_host, vm, exp))
+        return task
+
+    def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+        """
+        Waits and provides updates on a vSphere task
+        """
+        while task.info.state == vim.TaskInfo.State.running:
+            time.sleep(2)
+
+        if task.info.state == vim.TaskInfo.State.success:
+            if task.info.result is not None and not hideResult:
+                self.logger.info('{} completed successfully, result: {}'.format(
+                                                            actionName,
+                                                            task.info.result))
+            else:
+                self.logger.info('Task {} completed successfully.'.format(actionName))
+        else:
+            self.logger.error('{} did not complete successfully: {} '.format(
+                                                            actionName,
+                                                            task.info.error)
+                              )
+
+        return task.info.result
+
+    def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
+        """
+         Method to add pci device in given VM
+
+            Args:
+                host_object - vSphere host object
+                vm_object - vSphere VM object
+                host_pci_dev -  host_pci_dev must be one of the devices from the
+                                host_object.hardware.pciDevice list
+                                which is configured as a PCI passthrough device
+
+            Returns:
+                task object
+        """
+        task = None
+        if vm_object and host_object and host_pci_dev:
+            try :
+                #Add PCI device to VM
+                pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
+                systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+
+                if host_pci_dev.id not in systemid_by_pciid:
+                    self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+                    return None
+
+                deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
+                backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
+                                            id=host_pci_dev.id,
+                                            systemId=systemid_by_pciid[host_pci_dev.id],
+                                            vendorId=host_pci_dev.vendorId,
+                                            deviceName=host_pci_dev.deviceName)
+
+                hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
+
+                new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+                new_device_config.operation = "add"
+                vmConfigSpec = vim.vm.ConfigSpec()
+                vmConfigSpec.deviceChange = [new_device_config]
+
+                task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
+                self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
+                                                            host_pci_dev, vm_object, host_object)
+                                )
+            except Exception as exp:
+                self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
+                                                                            host_pci_dev,
+                                                                            vm_object,
+                                                                             exp))
+        return task
+
+    def get_vm_vcenter_info(self):
+        """
+        Method to get details of vCenter and vm
+
+            Args:
+                vapp_uuid - uuid of vApp or VM
+
+            Returns:
+                Moref Id of VM and deails of vCenter
+        """
+        vm_vcenter_info = {}
+
+        if self.vcenter_ip is not None:
+            vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
+        else:
+            raise vimconn.vimconnException(message="vCenter IP is not provided."\
+                                           " Please provide vCenter IP while attaching datacenter to tenant in --config")
+        if self.vcenter_port is not None:
+            vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
+        else:
+            raise vimconn.vimconnException(message="vCenter port is not provided."\
+                                           " Please provide vCenter port while attaching datacenter to tenant in --config")
+        if self.vcenter_user is not None:
+            vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
+        else:
+            raise vimconn.vimconnException(message="vCenter user is not provided."\
+                                           " Please provide vCenter user while attaching datacenter to tenant in --config")
+
+        if self.vcenter_password is not None:
+            vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
+        else:
+            raise vimconn.vimconnException(message="vCenter user password is not provided."\
+                                           " Please provide vCenter user password while attaching datacenter to tenant in --config")
+
+        return vm_vcenter_info
+
+
+    def get_vm_pci_details(self, vmuuid):
+        """
+            Method to get VM PCI device details from vCenter
+
+            Args:
+                vm_obj - vSphere VM object
+
+            Returns:
+                dict of PCI devives attached to VM
+
+        """
+        vm_pci_devices_info = {}
+        try:
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+            if vm_moref_id:
+                #Get VM and its host
+                if content:
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    if host_obj and vm_obj:
+                        vm_pci_devices_info["host_name"]= host_obj.name
+                        vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
+                        for device in vm_obj.config.hardware.device:
+                            if type(device) == vim.vm.device.VirtualPCIPassthrough:
+                                device_details={'devide_id':device.backing.id,
+                                                'pciSlotNumber':device.slotInfo.pciSlotNumber,
+                                            }
+                                vm_pci_devices_info[device.deviceInfo.label] = device_details
+                else:
+                    self.logger.error("Can not connect to vCenter while getting "\
+                                          "PCI devices infromationn")
+                return vm_pci_devices_info
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+
+    def reserve_memory_for_all_vms(self, vapp, memory_mb):
+        """
+            Method to reserve memory for all VMs
+            Args :
+                vapp - VApp
+                memory_mb - Memory in MB
+            Returns:
+                None
+        """
+
+        self.logger.info("Reserve memory for all VMs")
+        for vms in vapp.get_all_vms():
+            vm_id = vms.get('id').split(':')[-1]
+
+            url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
+            response = self.perform_request(req_type='GET',
+                                            url=url_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                            response.content,
+                                                            response.status_code))
+                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
+                                               "memory")
+
+            bytexml = bytes(bytearray(response.content, encoding='utf-8'))
+            contentelem = lxmlElementTree.XML(bytexml)
+            namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            # Find the reservation element in the response
+            memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
+            for memelem in memelem_list:
+                memelem.text = str(memory_mb)
+
+            newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
+
+            response = self.perform_request(req_type='PUT',
+                                            url=url_rest_call,
+                                            headers=headers,
+                                            data=newdata)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+            if response.status_code != 202:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {} ".format(url_rest_call,
+                                  response.content,
+                                  response.status_code))
+                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
+                                               "virtual hardware memory section")
+            else:
+                mem_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=mem_task)
+                if result.get('status') == 'success':
+                    self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
+                                      .format(vm_id))
+                else:
+                    self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
+                                      .format(vm_id))
+
+    def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
+        """
+            Configure VApp network config with org vdc network
+            Args :
+                vapp - VApp
+            Returns:
+                None
+        """
+
+        self.logger.info("Connecting vapp {} to org vdc network {}".
+                         format(vapp_id, net_name))
+
+        url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=url_rest_call,
+                                        headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', url_rest_call)
+
+        if response.status_code != 200:
+            self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {}".format(url_rest_call,
+                                                        response.content,
+                                                        response.status_code))
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
+                                           "network config section")
+
+        data = response.content
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
+        net_id = self.get_network_id_by_name(net_name)
+        if not net_id:
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
+                                           "existing network")
+
+        bytexml = bytes(bytearray(data, encoding='utf-8'))
+        newelem = lxmlElementTree.XML(bytexml)
+        namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
+        namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
+        nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
+
+        # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
+        parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
+        if parentnetworklist:
+            for pn in parentnetworklist:
+                if "href" not in pn.keys():
+                    id_val = pn.get("id")
+                    href_val = "{}/api/network/{}".format(self.url, id_val)
+                    pn.set("href", href_val)
+
+        newstr = """<NetworkConfig networkName="{}">
+                  <Configuration>
+                       <ParentNetwork href="{}/api/network/{}"/>
+                       <FenceMode>bridged</FenceMode>
+                  </Configuration>
+              </NetworkConfig>
+           """.format(net_name, self.url, net_id)
+        newcfgelem = lxmlElementTree.fromstring(newstr)
+        if nwcfglist:
+            nwcfglist[0].addnext(newcfgelem)
+
+        newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
+
+        response = self.perform_request(req_type='PUT',
+                                        url=url_rest_call,
+                                        headers=headers,
+                                        data=newdata)
+
+        if response.status_code == 403:
+            add_headers = {'Content-Type': headers['Content-Type']}
+            response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {} ".format(url_rest_call,
+                              response.content,
+                              response.status_code))
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
+                                           "network config section")
+        else:
+            vapp_task = self.get_task_from_response(response.content)
+            result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
+            if result.get('status') == 'success':
+                self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
+                                 "network {}".format(vapp_id, net_name))
+            else:
+                self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
+                                  "connect to network {}".format(vapp_id, net_name))
+
+    def remove_primary_network_adapter_from_all_vms(self, vapp):
+        """
+            Method to remove network adapter type to vm
+            Args :
+                vapp - VApp
+            Returns:
+                None
+        """
+
+        self.logger.info("Removing network adapter from all VMs")
+        for vms in vapp.get_all_vms():
+            vm_id = vms.get('id').split(':')[-1]
+
+            url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=url_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                            response.content,
+                                                            response.status_code))
+                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
+                                               "network connection section")
+
+            data = response.content
+            data = data.split('<Link rel="edit"')[0]
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+            newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                      <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
+                              xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                              xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
+                              xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
+                              xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                              xmlns:vmw="http://www.vmware.com/schema/ovf"
+                              xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
+                              xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                              xmlns:ns9="http://www.vmware.com/vcloud/versions"
+                              href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
+                              <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                             <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
+                             <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
+                      </NetworkConnectionSection>""".format(url=url_rest_call)
+            response = self.perform_request(req_type='PUT',
+                                            url=url_rest_call,
+                                            headers=headers,
+                                            data=newdata)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+            if response.status_code != 202:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {} ".format(url_rest_call,
+                                  response.content,
+                                  response.status_code))
+                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
+                                               "network connection section")
+            else:
+                nic_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                if result.get('status') == 'success':
+                    self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
+                                      "default NIC type".format(vm_id))
+                else:
+                    self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
+                                      "connect NIC type".format(vm_id))
+
+    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+        """
+            Method to add network adapter type to vm
+            Args :
+                network_name - name of network
+                primary_nic_index - int value for primary nic index
+                nicIndex - int value for nic index
+                nic_type - specify model name to which add to vm
+            Returns:
+                None
+        """
+
+        self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
+                         format(network_name, nicIndex, nic_type))
+        try:
+            ip_address = None
+            floating_ip = False
+            mac_address = None
+            if 'floating_ip' in net: floating_ip = net['floating_ip']
+
+            # Stub for ip_address feature
+            if 'ip_address' in net: ip_address = net['ip_address']
+
+            if 'mac_address' in net: mac_address = net['mac_address']
+
+            if floating_ip:
+                allocation_mode = "POOL"
+            elif ip_address:
+                allocation_mode = "MANUAL"
+            else:
+                allocation_mode = "DHCP"
+
+            if not nic_type:
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+
+                    data = response.content
+                    data = data.split('<Link rel="edit"')[0]
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        self.logger.debug("add_network_adapter PrimaryNIC not in data")
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                         allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                    else:
+                        self.logger.debug("add_network_adapter PrimaryNIC in data")
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                          allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {} ".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                    else:
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
+                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                               "default NIC type".format(vm_id))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                              "connect NIC type".format(vm_id))
+            else:
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                        "network connection section")
+                    data = response.content
+                    data = data.split('<Link rel="edit"')[0]
+                    vcd_netadapter_type = nic_type
+                    if nic_type in ['SR-IOV', 'VF']:
+                        vcd_netadapter_type = "SRIOVETHERNETCARD"
+
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                <NetworkAdapterType>{}</NetworkAdapterType>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                               allocation_mode, vcd_netadapter_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                    else:
+                        self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    <NetworkAdapterType>{}</NetworkAdapterType>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                allocation_mode, vcd_netadapter_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                           "network connection section")
+                    else:
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
+                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                               "conneced to NIC type {}".format(vm_id, nic_type))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                               "failed to connect NIC type {}".format(vm_id, nic_type))
+        except Exception as exp:
+            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                               "while adding Network adapter")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def set_numa_affinity(self, vmuuid, paired_threads_id):
+        """
+            Method to assign numa affinity in vm configuration parammeters
+            Args :
+                vmuuid - vm uuid
+                paired_threads_id - one or more virtual processor
+                                    numbers
+            Returns:
+                return if True
+        """
+        try:
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+
+            host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+            if vm_obj:
+                config_spec = vim.vm.ConfigSpec()
+                config_spec.extraConfig = []
+                opt = vim.option.OptionValue()
+                opt.key = 'numa.nodeAffinity'
+                opt.value = str(paired_threads_id)
+                config_spec.extraConfig.append(opt)
+                task = vm_obj.ReconfigVM_Task(config_spec)
+                if task:
+                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                    extra_config = vm_obj.config.extraConfig
+                    flag = False
+                    for opts in extra_config:
+                        if 'numa.nodeAffinity' in opts.key:
+                            flag = True
+                            self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                     "value {} for vm {}".format(opt.value, vm_obj))
+                        if flag:
+                            return
+            else:
+                self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+        except Exception as exp:
+            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
+            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                           "affinity".format(exp))
+
+
+    def cloud_init(self, vapp, cloud_config):
+        """
+        Method to inject ssh-key
+        vapp - vapp object
+        cloud_config a dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+        """
+        try:
+            if not isinstance(cloud_config, dict):
+                raise Exception("cloud_init : parameter cloud_config is not a dictionary")
+            else:
+                key_pairs = []
+                userdata = []
+                if "key-pairs" in cloud_config:
+                    key_pairs = cloud_config["key-pairs"]
+
+                if "users" in cloud_config:
+                    userdata = cloud_config["users"]
+
+                self.logger.debug("cloud_init : Guest os customization started..")
+                customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
+                customize_script = customize_script.replace("&","&amp;")
+                self.guest_customization(vapp, customize_script)
+
+        except Exception as exp:
+            self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                       "ssh-key")
+            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                               "ssh-key".format(exp))
+
+    def format_script(self, key_pairs=[], users_list=[]):
+        bash_script = """#!/bin/sh
+        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+        if [ "$1" = "precustomization" ];then
+            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+        """
+
+        keys = "\n".join(key_pairs)
+        if keys:
+            keys_data = """
+            if [ ! -d /root/.ssh ];then
+                mkdir /root/.ssh
+                chown root:root /root/.ssh
+                chmod 700 /root/.ssh
+                touch /root/.ssh/authorized_keys
+                chown root:root /root/.ssh/authorized_keys
+                chmod 600 /root/.ssh/authorized_keys
+                # make centos with selinux happy
+                which restorecon && restorecon -Rv /root/.ssh
+            else
+                touch /root/.ssh/authorized_keys
+                chown root:root /root/.ssh/authorized_keys
+                chmod 600 /root/.ssh/authorized_keys
+            fi
+            echo '{key}' >> /root/.ssh/authorized_keys
+            """.format(key=keys)
+
+            bash_script+= keys_data
+
+        for user in users_list:
+            if 'name' in user: user_name = user['name']
+            if 'key-pairs' in user:
+                user_keys = "\n".join(user['key-pairs'])
+            else:
+                user_keys = None
+
+            add_user_name = """
+                useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
+                """.format(user_name=user_name)
+
+            bash_script+= add_user_name
+
+            if user_keys:
+                user_keys_data = """
+                mkdir /home/{user_name}/.ssh
+                chown {user_name}:{user_name} /home/{user_name}/.ssh
+                chmod 700 /home/{user_name}/.ssh
+                touch /home/{user_name}/.ssh/authorized_keys
+                chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                chmod 600 /home/{user_name}/.ssh/authorized_keys
+                # make centos with selinux happy
+                which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                """.format(user_name=user_name,user_key=user_keys)
+
+                bash_script+= user_keys_data
+
+        return bash_script+"\n\tfi"
+
+    def guest_customization(self, vapp, customize_script):
+        """
+        Method to customize guest os
+        vapp - Vapp object
+        customize_script - Customize script to be run at first boot of VM.
+        """
+        for vm in vapp.get_all_vms():
+            vm_id = vm.get('id').split(':')[-1]
+            vm_name = vm.get('name')
+            vm_name = vm_name.replace('_','-')
+
+            vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+
+            data = """<GuestCustomizationSection
+                           xmlns="http://www.vmware.com/vcloud/v1.5"
+                           xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                           ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
+                           <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
+                           <Enabled>true</Enabled>
+                           <ChangeSid>false</ChangeSid>
+                           <VirtualMachineId>{}</VirtualMachineId>
+                           <JoinDomainEnabled>false</JoinDomainEnabled>
+                           <UseOrgSettings>false</UseOrgSettings>
+                           <AdminPasswordEnabled>false</AdminPasswordEnabled>
+                           <AdminPasswordAuto>true</AdminPasswordAuto>
+                           <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
+                           <AdminAutoLogonCount>0</AdminAutoLogonCount>
+                           <ResetPasswordRequired>false</ResetPasswordRequired>
+                           <CustomizationScript>{}</CustomizationScript>
+                           <ComputerName>{}</ComputerName>
+                           <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
+                       </GuestCustomizationSection>
+                   """.format(vm_customization_url,
+                                             vm_id,
+                                  customize_script,
+                                           vm_name,
+                              vm_customization_url)
+
+            response = self.perform_request(req_type='PUT',
+                                             url=vm_customization_url,
+                                             headers=headers,
+                                             data=data)
+            if response.status_code == 202:
+                guest_task = self.get_task_from_response(response.content)
+                self.client.get_task_monitor().wait_for_success(task=guest_task)
+                self.logger.info("guest_customization : customized guest os task "\
+                                             "completed for VM {}".format(vm_name))
+            else:
+                self.logger.error("guest_customization : task for customized guest os"\
+                                                    "failed for VM {}".format(vm_name))
+                raise vimconn.vimconnException("guest_customization : failed to perform"\
+                                       "guest os customization on VM {}".format(vm_name))
+
+    def add_new_disk(self, vapp_uuid, disk_size):
+        """
+            Method to create an empty vm disk
+
+            Args:
+                vapp_uuid - is vapp identifier.
+                disk_size - size of disk to be created in GB
+
+            Returns:
+                None
+        """
+        status = False
+        vm_details = None
+        try:
+            #Disk size in GB, convert it into MB
+            if disk_size is not None:
+                disk_size_mb = int(disk_size) * 1024
+                vm_details = self.get_vapp_details_rest(vapp_uuid)
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                status = self.add_new_disk_rest(disk_href, disk_size_mb)
+
+        except Exception as exp:
+            msg = "Error occurred while creating new disk {}.".format(exp)
+            self.rollback_newvm(vapp_uuid, msg)
+
+        if status:
+            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+        else:
+            #If failed to add disk, delete VM
+            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def add_new_disk_rest(self, disk_href, disk_size_mb):
+        """
+        Retrives vApp Disks section & add new empty disk
+
+        Args:
+            disk_href: Disk section href to addd disk
+            disk_size_mb: Disk size in MB
+
+            Returns: Status of add new disk task
+        """
+        status = False
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=disk_href,
+                                            headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                              .format(disk_href, response.status_code))
+            return status
+        try:
+            #Find but type & max of instance IDs assigned to disks
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            instance_id = 0
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                    if inst_id > instance_id:
+                        instance_id = inst_id
+                        disk_item = item.find("rasd:HostResource" ,namespaces)
+                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+
+            instance_id = instance_id + 1
+            new_item =   """<Item>
+                                <rasd:Description>Hard disk</rasd:Description>
+                                <rasd:ElementName>New disk</rasd:ElementName>
+                                <rasd:HostResource
+                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                    vcloud:capacity="{}"
+                                    vcloud:busSubType="{}"
+                                    vcloud:busType="{}"></rasd:HostResource>
+                                <rasd:InstanceID>{}</rasd:InstanceID>
+                                <rasd:ResourceType>17</rasd:ResourceType>
+                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+
+            new_data = response.content
+            #Add new item at the bottom
+            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+
+            # Send PUT request to modify virtual hardware section with new disk
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = self.perform_request(req_type='PUT',
+                                            url=disk_href,
+                                            data=new_data,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, new_data)
+
+            if response.status_code != 202:
+                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                  .format(disk_href, response.status_code, response.content))
+            else:
+                add_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
+                if result.get('status') == 'success':
+                    status = True
+                else:
+                    self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+
+        except Exception as exp:
+            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+
+        return status
+
+
+    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+        """
+            Method to add existing disk to vm
+            Args :
+                catalogs - List of VDC catalogs
+                image_id - Catalog ID
+                template_name - Name of template in catalog
+                vapp_uuid - UUID of vApp
+            Returns:
+                None
+        """
+        disk_info = None
+        vcenter_conect, content = self.get_vcenter_content()
+        #find moref-id of vm in image
+        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                         image_id=image_id,
+                                                        )
+
+        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                if catalog_vm_moref_id:
+                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                    if catalog_vm_obj:
+                        #find existing disk
+                        disk_info = self.find_disk(catalog_vm_obj)
+                    else:
+                        exp_msg = "No VM with image id {} found".format(image_id)
+                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+        else:
+            exp_msg = "No Image found with image ID {} ".format(image_id)
+            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+
+        if disk_info:
+            self.logger.info("Existing disk_info : {}".format(disk_info))
+            #get VM
+            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+            if vm_obj:
+                status = self.add_disk(vcenter_conect=vcenter_conect,
+                                       vm=vm_obj,
+                                       disk_info=disk_info,
+                                       size=size,
+                                       vapp_uuid=vapp_uuid
+                                       )
+            if status:
+                self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                            vm_obj.config.name)
+                                 )
+        else:
+            msg = "No disk found with image id {} to add in VM {}".format(
+                                                            image_id,
+                                                            vm_obj.config.name)
+            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+
+
+    def find_disk(self, vm_obj):
+        """
+         Method to find details of existing disk in VM
+            Args :
+                vm_obj - vCenter object of VM
+                image_id - Catalog ID
+            Returns:
+                disk_info : dict of disk details
+        """
+        disk_info = {}
+        if vm_obj:
+            try:
+                devices = vm_obj.config.hardware.device
+                for device in devices:
+                    if type(device) is vim.vm.device.VirtualDisk:
+                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                            disk_info["full_path"] = device.backing.fileName
+                            disk_info["datastore"] = device.backing.datastore
+                            disk_info["capacityKB"] = device.capacityInKB
+                            break
+            except Exception as exp:
+                self.logger.error("find_disk() : exception occurred while "\
+                                  "getting existing disk details :{}".format(exp))
+        return disk_info
+
+
+    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+        """
+         Method to add existing disk in VM
+            Args :
+                vcenter_conect - vCenter content object
+                vm - vCenter vm object
+                disk_info : dict of disk details
+            Returns:
+                status : status of add disk task
+        """
+        datastore = disk_info["datastore"] if "datastore" in disk_info else None
+        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+        if size is not None:
+            #Convert size from GB to KB
+            sizeKB = int(size) * 1024 * 1024
+            #compare size of existing disk and user given size.Assign whicherver is greater
+            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                    sizeKB, capacityKB))
+            if sizeKB > capacityKB:
+                capacityKB = sizeKB
+
+        if datastore and fullpath and capacityKB:
+            try:
+                spec = vim.vm.ConfigSpec()
+                # get all disks on a VM, set unit_number to the next available
+                unit_number = 0
+                for dev in vm.config.hardware.device:
+                    if hasattr(dev.backing, 'fileName'):
+                        unit_number = int(dev.unitNumber) + 1
+                        # unit_number 7 reserved for scsi controller
+                        if unit_number == 7:
+                            unit_number += 1
+                    if isinstance(dev, vim.vm.device.VirtualDisk):
+                        #vim.vm.device.VirtualSCSIController
+                        controller_key = dev.controllerKey
+
+                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                    unit_number, controller_key))
+                # add disk here
+                dev_changes = []
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                disk_spec.device = vim.vm.device.VirtualDisk()
+                disk_spec.device.backing = \
+                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                disk_spec.device.backing.thinProvisioned = True
+                disk_spec.device.backing.diskMode = 'persistent'
+                disk_spec.device.backing.datastore  = datastore
+                disk_spec.device.backing.fileName  = fullpath
+
+                disk_spec.device.unitNumber = unit_number
+                disk_spec.device.capacityInKB = capacityKB
+                disk_spec.device.controllerKey = controller_key
+                dev_changes.append(disk_spec)
+                spec.deviceChange = dev_changes
+                task = vm.ReconfigVM_Task(spec=spec)
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+                return status
+            except Exception as exp:
+                exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                          "{} to vm {}".format(exp,
+                                               fullpath,
+                                               vm.config.name)
+                self.rollback_newvm(vapp_uuid, exp_msg)
+        else:
+            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def get_vcenter_content(self):
+        """
+         Get the vsphere content object
+        """
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info()
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            context = ssl._create_unverified_context()
+
+        vcenter_conect = SmartConnect(
+                    host=vm_vcenter_info["vm_vcenter_ip"],
+                    user=vm_vcenter_info["vm_vcenter_user"],
+                    pwd=vm_vcenter_info["vm_vcenter_password"],
+                    port=int(vm_vcenter_info["vm_vcenter_port"]),
+                    sslContext=context
+                )
+        atexit.register(Disconnect, vcenter_conect)
+        content = vcenter_conect.RetrieveContent()
+        return vcenter_conect, content
+
+
+    def get_vm_moref_id(self, vapp_uuid):
+        """
+        Get the moref_id of given VM
+        """
+        try:
+            if vapp_uuid:
+                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                if vm_details and "vm_vcenter_info" in vm_details:
+                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+            return vm_moref_id
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM moref ID "\
+                             " for VM : {}".format(exp))
+            return None
+
+
+    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+        """
+            Method to get vApp template details
+                Args :
+                    catalogs - list of VDC catalogs
+                    image_id - Catalog ID to find
+                    template_name : template name in catalog
+                Returns:
+                    parsed_respond : dict of vApp tempalte details
+        """
+        parsed_response = {}
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+
+        try:
+            org, vdc = self.get_vdc_details()
+            catalog = self.get_catalog_obj(image_id, catalogs)
+            if catalog:
+                items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=catalog_items[0].get('href'),
+                                                    headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+                    #get vapp details and parse moref id
+
+                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                  'vmw': 'http://www.vmware.com/schema/ovf',
+                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                }
+
+                    if vca._session:
+                        response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
+
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                vapp_tempalte_href, response.status_code))
+
+                        else:
+                            xmlroot_respond = XmlElementTree.fromstring(response.content)
+                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                            if children_section is not None:
+                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                            if vCloud_extension_section is not None:
+                                vm_vcenter_info = {}
+                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                if vmext is not None:
+                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
+
+        except Exception as exp :
+            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+
+        return parsed_response
+
+
+    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+        """
+            Method to delete vApp
+                Args :
+                    vapp_uuid - vApp UUID
+                    msg - Error message to be logged
+                    exp_type : Exception type
+                Returns:
+                    None
+        """
+        if vapp_uuid:
+            status = self.delete_vminstance(vapp_uuid)
+        else:
+            msg = "No vApp ID"
+        self.logger.error(msg)
+        if exp_type == "Genric":
+            raise vimconn.vimconnException(msg)
+        elif exp_type == "NotFound":
+            raise vimconn.vimconnNotFoundException(message=msg)
+
+    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+        """
+            Method to attach SRIOV adapters to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+                vmname_andid - vmname
+
+            Returns:
+                The status of add SRIOV adapter task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_sriov_devices = len(sriov_nets)
+                if no_of_sriov_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get SRIOV devies from host on which vapp is currently installed
+                        avilable_sriov_devices = self.get_sriov_devices(host_obj,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                        if len(avilable_sriov_devices) == 0:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
+                                                                content,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                            if new_host_obj is not None and len(avilable_sriov_devices)> 0:
+                                #Migrate vm to the host where SRIOV devices are available
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
+                                                                                    new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
+                            #Add SRIOV devices one by one
+                            for sriov_net in sriov_nets:
+                                network_name = sriov_net.get('net_id')
+                                dvs_portgr_name = self.create_dvPort_group(network_name)
+                                if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
+                                    #add vlan ID ,Modify portgroup for vlan ID
+                                    self.configure_vlanID(content, vcenter_conect, network_name)
+
+                                task = self.add_sriov_to_vm(content,
+                                                            vm_obj,
+                                                            host_obj,
+                                                            network_name,
+                                                            avilable_sriov_devices[0]
+                                                            )
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                    raise vimconn.vimconnUnexpectedResponse(
+                                    "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+                                        )
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble SRIOV "\
+                                              "VFs required for VM {}".format(
+                                                                no_of_sriov_devices,
+                                                                vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble SRIOV devices required for VM {}".format(
+                                                                            no_of_sriov_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding SRIOV {} ",error)
+        return None, vm_obj, vcenter_conect
+
+
+    def get_sriov_devices(self,host, no_of_vfs):
+        """
+            Method to get the details of SRIOV devices on given host
+             Args:
+                host - vSphere host object
+                no_of_vfs - number of VFs needed on host
+
+             Returns:
+                array of SRIOV devices
+        """
+        sriovInfo=[]
+        if host:
+            for device in host.config.pciPassthruInfo:
+                if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
+                    if device.numVirtualFunction >= no_of_vfs:
+                        sriovInfo.append(device)
+                        break
+        return sriovInfo
+
+
+    def get_host_and_sriov_devices(self, content, no_of_vfs):
+        """
+         Method to get the details of SRIOV devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                no_of_vfs - number of pci VFs needed on host
+
+            Returns:
+                 array of SRIOV devices and host object
+        """
+        host_obj = None
+        sriov_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_sriov_devices(host, no_of_vfs)
+                    if devices:
+                        host_obj = host
+                        sriov_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+
+        return host_obj,sriov_device_objs
+
+
+    def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
+        """
+         Method to add SRIOV adapter to vm
+
+            Args:
+                host_obj - vSphere host object
+                vm_obj - vSphere vm object
+                content - vCenter content object
+                network_name - name of distributed virtaul portgroup
+                sriov_device - SRIOV device info
+
+            Returns:
+                 task object
+        """
+        devices = []
+        vnic_label = "sriov nic"
+        try:
+            dvs_portgr = self.get_dvport_group(network_name)
+            network_name = dvs_portgr.name
+            nic = vim.vm.device.VirtualDeviceSpec()
+            # VM device
+            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+            nic.device = vim.vm.device.VirtualSriovEthernetCard()
+            nic.device.addressType = 'assigned'
+            #nic.device.key = 13016
+            nic.device.deviceInfo = vim.Description()
+            nic.device.deviceInfo.label = vnic_label
+            nic.device.deviceInfo.summary = network_name
+            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+
+            nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+            nic.device.backing.deviceName = network_name
+            nic.device.backing.useAutoDetect = False
+            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+            nic.device.connectable.startConnected = True
+            nic.device.connectable.allowGuestControl = True
+
+            nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+
+            devices.append(nic)
+            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+            task = vm_obj.ReconfigVM_Task(vmconf)
+            return task
+        except Exception as exp:
+            self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+            return None
+
+
+    def create_dvPort_group(self, network_name):
+        """
+         Method to create disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup key
+        """
+        try:
+            new_network_name = [network_name, '-', str(uuid.uuid4())]
+            network_name=''.join(new_network_name)
+            vcenter_conect, content = self.get_vcenter_content()
+
+            dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+            if dv_switch:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.name = network_name
+
+                dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+
+                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+                self.wait_for_vcenter_task(task, vcenter_conect)
+
+                dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+                if dvPort_group:
+                    self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+                    return dvPort_group.key
+            else:
+                self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+
+        except Exception as exp:
+            self.logger.error("Error occurred while creating disributed virtaul port group {}"\
+                             " : {}".format(network_name, exp))
+        return None
+
+    def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
+        """
+         Method to reconfigure disributed virtual portgroup
+
+            Args:
+                dvPort_group_name - name of disributed virtual portgroup
+                content - vCenter content object
+                config_info - disributed virtual portgroup configuration
+
+            Returns:
+                task object
+        """
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.configVersion = dvPort_group.config.configVersion
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                if "vlanID" in config_info:
+                    dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                    dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+
+                task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+                return task
+            else:
+                return None
+        except Exception as exp:
+            self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
+                             " : {}".format(dvPort_group_name, exp))
+            return None
+
+
+    def destroy_dvport_group(self , dvPort_group_name):
+        """
+         Method to destroy disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                True if portgroup successfully got deleted else false
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        try:
+            status = None
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                task = dvPort_group.Destroy_Task()
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+            return status
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+                                                                    exp, dvPort_group_name))
+            return None
+
+
+    def get_dvport_group(self, dvPort_group_name):
+        """
+        Method to get disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup object
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        dvPort_group = None
+        try:
+            container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
+            for item in container.view:
+                if item.key == dvPort_group_name:
+                    dvPort_group = item
+                    break
+            return dvPort_group
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+            return None
+
+    def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
+        """
+         Method to get disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                vlan ID
+        """
+        vlanId = None
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+        return vlanId
+
+
+    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+        """
+         Method to configure vlanID in disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                None
+        """
+        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+        if vlanID == 0:
+            #configure vlanID
+            vlanID = self.genrate_vlanID(dvPort_group_name)
+            config = {"vlanID":vlanID}
+            task = self.reconfig_portgroup(content, dvPort_group_name,
+                                    config_info=config)
+            if task:
+                status= self.wait_for_vcenter_task(task, vcenter_conect)
+                if status:
+                    self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
+                                                        dvPort_group_name,vlanID))
+            else:
+                self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
+                                        dvPort_group_name, vlanID))
+
+
+    def genrate_vlanID(self, network_name):
+        """
+         Method to get unused vlanID
+            Args:
+                network_name - name of network/portgroup
+            Returns:
+                vlanID
+        """
+        vlan_id = None
+        used_ids = []
+        if self.config.get('vlanID_range') == None:
+            raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
+                        "at config value before creating sriov network with vlan tag")
+        if "used_vlanIDs" not in self.persistent_info:
+                self.persistent_info["used_vlanIDs"] = {}
+        else:
+            used_ids = list(self.persistent_info["used_vlanIDs"].values())
+
+        for vlanID_range in self.config.get('vlanID_range'):
+            start_vlanid , end_vlanid = vlanID_range.split("-")
+            if start_vlanid > end_vlanid:
+                raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
+                                                                        vlanID_range))
+
+            for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
+            #For python3
+            #for id in range(int(start_vlanid), int(end_vlanid) + 1):
+                if id not in used_ids:
+                    vlan_id = id
+                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+                    return vlan_id
+        if vlan_id is None:
+            raise vimconn.vimconnConflictException("All Vlan IDs are in use")
+
+
+    def get_obj(self, content, vimtype, name):
+        """
+         Get the vsphere object associated with a given text name
+        """
+        obj = None
+        container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+        for item in container.view:
+            if item.name == name:
+                obj = item
+                break
+        return obj
+
+
+    def insert_media_to_vm(self, vapp, image_id):
+        """
+        Method to insert media CD-ROM (ISO image) from catalog to vm.
+        vapp - vapp object to get vm id
+        Image_id - image id for cdrom to be inerted to vm
+        """
+        # create connection object
+        vca = self.connect()
+        try:
+            # fetching catalog details
+            rest_url = "{}/api/catalog/{}".format(self.url, image_id)
+            if vca._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=rest_url,
+                                                headers=headers)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                             "status code : {}".format(url_rest_call,
+                                                    response.content,
+                                               response.status_code))
+                raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
+                                                                    "catalog details")
+            # searching iso name and id
+            iso_name,media_id = self.get_media_details(vca, response.content)
+
+            if iso_name and media_id:
+                data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                     <ns6:MediaInsertOrEjectParams
+                     xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" 
+                     xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" 
+                     xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" 
+                     xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" 
+                     xmlns:ns6="http://www.vmware.com/vcloud/v1.5" 
+                     xmlns:ns7="http://www.vmware.com/schema/ovf" 
+                     xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" 
+                     xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
+                     <ns6:Media
+                        type="application/vnd.vmware.vcloud.media+xml"
+                        name="{}"
+                        id="urn:vcloud:media:{}"
+                        href="https://{}/api/media/{}"/>
+                     </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
+                                                                self.url,media_id)
+
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
+                    rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
+
+                    response = self.perform_request(req_type='POST',
+                                                       url=rest_url,
+                                                          data=data,
+                                                    headers=headers)
+
+                    if response.status_code != 202:
+                        error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
+                                    "Status code {}".format(response.text, response.status_code)
+                        self.logger.error(error_msg)
+                        raise vimconn.vimconnException(error_msg)
+                    else:
+                        task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=task)
+                        if result.get('status') == 'success':
+                            self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
+                                                                    " image to vm {}".format(vm_id))
+
+        except Exception as exp:
+            self.logger.error("insert_media_to_vm() : exception occurred "\
+                                            "while inserting media CD-ROM")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def get_media_details(self, vca, content):
+        """
+        Method to get catalog item details
+        vca - connection object
+        content - Catalog details
+        Return - Media name, media id
+        """
+        cataloghref_list = []
+        try:
+            if content:
+                vm_list_xmlroot = XmlElementTree.fromstring(content)
+                for child in vm_list_xmlroot.iter():
+                    if 'CatalogItem' in child.tag:
+                        cataloghref_list.append(child.attrib.get('href'))
+                if cataloghref_list is not None:
+                    for href in cataloghref_list:
+                        if href:
+                            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                            response = self.perform_request(req_type='GET',
+                                                                  url=href,
+                                                           headers=headers)
+                            if response.status_code != 200:
+                                self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(href,
+                                                           response.content,
+                                                      response.status_code))
+                                raise vimconn.vimconnException("get_media_details : Failed to get "\
+                                                                         "catalogitem details")
+                            list_xmlroot = XmlElementTree.fromstring(response.content)
+                            for child in list_xmlroot.iter():
+                                if 'Entity' in child.tag:
+                                    if 'media' in child.attrib.get('href'):
+                                        name = child.attrib.get('name')
+                                        media_id = child.attrib.get('href').split('/').pop()
+                                        return name,media_id
+                            else:
+                                self.logger.debug("Media name and id not found")
+                                return False,False
+        except Exception as exp:
+            self.logger.error("get_media_details : exception occurred "\
+                                               "getting media details")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def retry_rest(self, method, url, add_headers=None, data=None):
+        """ Method to get Token & retry respective REST request
+            Args:
+                api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
+                url - request url to be used
+                add_headers - Additional headers (optional)
+                data - Request payload data to be passed in request
+            Returns:
+                response - Response of request
+        """
+        response = None
+
+        #Get token
+        self.get_token()
+
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+        if add_headers:
+            headers.update(add_headers)
+
+        if method == 'GET':
+            response = self.perform_request(req_type='GET',
+                                            url=url,
+                                            headers=headers)
+        elif method == 'PUT':
+            response = self.perform_request(req_type='PUT',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)
+        elif method == 'POST':
+            response = self.perform_request(req_type='POST',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)
+        elif method == 'DELETE':
+            response = self.perform_request(req_type='DELETE',
+                                            url=url,
+                                            headers=headers)
+        return response
+
+
+    def get_token(self):
+        """ Generate a new token if expired
+
+            Returns:
+                The return client object that letter can be used to connect to vCloud director as admin for VDC
+        """
+        try:
+            self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
+                                                                                      self.user,
+                                                                                      self.org_name))
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_highest_supported_version()
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+            # connection object
+            self.client = client
+
+        except:
+            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                     "{} as user: {}".format(self.org_name, self.user))
+
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
+
+
+    def get_vdc_details(self):
+        """ Get VDC details using pyVcloud Lib
+
+            Returns org and vdc object
+        """
+        vdc = None
+        try:
+            org = Org(self.client, resource=self.client.get_org())
+            vdc = org.get_vdc(self.tenant_name)
+        except Exception as e:
+            # pyvcloud not giving a specific exception, Refresh nevertheless
+            self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
+
+        #Retry once, if failed by refreshing token
+        if vdc is None:
+            self.get_token()
+            org = Org(self.client, resource=self.client.get_org())
+            vdc = org.get_vdc(self.tenant_name)
+
+        return org, vdc
+
+
+    def perform_request(self, req_type, url, headers=None, data=None):
+        """Perform the POST/PUT/GET/DELETE request."""
+
+        #Log REST request details
+        self.log_request(req_type, url=url, headers=headers, data=data)
+        # perform request and return its result
+        if req_type == 'GET':
+            response = requests.get(url=url,
+                                headers=headers,
+                                verify=False)
+        elif req_type == 'PUT':
+            response = requests.put(url=url,
+                                headers=headers,
+                                data=data,
+                                verify=False)
+        elif req_type == 'POST':
+            response = requests.post(url=url,
+                                 headers=headers,
+                                 data=data,
+                                 verify=False)
+        elif req_type == 'DELETE':
+            response = requests.delete(url=url,
+                                 headers=headers,
+                                 verify=False)
+        #Log the REST response
+        self.log_response(response)
+
+        return response
+
+
+    def log_request(self, req_type, url=None, headers=None, data=None):
+        """Logs REST request details"""
+
+        if req_type is not None:
+            self.logger.debug("Request type: {}".format(req_type))
+
+        if url is not None:
+            self.logger.debug("Request url: {}".format(url))
+
+        if headers is not None:
+            for header in headers:
+                self.logger.debug("Request header: {}: {}".format(header, headers[header]))
+
+        if data is not None:
+            self.logger.debug("Request data: {}".format(data))
+
+
+    def log_response(self, response):
+        """Logs REST response details"""
+
+        self.logger.debug("Response status code: {} ".format(response.status_code))
+
+
+    def get_task_from_response(self, content):
+        """
+        content - API response content(response.content)
+        return task object
+        """
+        xmlroot = XmlElementTree.fromstring(content)
+        if xmlroot.tag.split('}')[1] == "Task":
+            return xmlroot
+        else:
+            for ele in xmlroot:
+                if ele.tag.split("}")[1] == "Tasks":
+                    task = ele[0]
+                    break
+            return task
+
+
+    def power_on_vapp(self,vapp_id, vapp_name):
+        """
+        vapp_id - vApp uuid
+        vapp_name - vAapp name
+        return - Task object
+        """
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+        poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
+                                                                          vapp_id)
+        response = self.perform_request(req_type='POST',
+                                       url=poweron_href,
+                                        headers=headers)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {} ".format(poweron_href,
+                                                response.content,
+                                           response.status_code))
+            raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
+                                                      "vApp {}".format(vapp_name))
+        else:
+            poweron_task = self.get_task_from_response(response.content)
+            return poweron_task
+
+
diff --git a/RO-VIM-vmware/requirements.txt b/RO-VIM-vmware/requirements.txt
new file mode 100644 (file)
index 0000000..af74bad
--- /dev/null
@@ -0,0 +1,25 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+pyvcloud==19.1.1
+pyvmomi
+progressbar
+prettytable
+# TODO py3 genisoimage
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
diff --git a/RO-VIM-vmware/setup.py b/RO-VIM-vmware/setup.py
new file mode 100644 (file)
index 0000000..193102e
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_vmware"
+
+README = """
+===========
+osm-rovim_vmware
+===========
+
+osm-ro pluging for vmware VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for vmware',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    # TODO py3 author_email='',
+    maintainer='OSM_TECH@LIST.ETSI.ORG',  # TODO py3
+    # TODO py3 maintainer_email='',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=[
+        "pyvcloud==19.1.1", "progressbar", "prettytable", "pyvmomi",
+        "requests", "netaddr", "PyYAML",
+        "osm-ro",
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_vmware = osm_rovim_vmware.vimconn_vmware'],
+    },
+)
diff --git a/RO-VIM-vmware/stdeb.cfg b/RO-VIM-vmware/stdeb.cfg
new file mode 100644 (file)
index 0000000..ff50a2f
--- /dev/null
@@ -0,0 +1,20 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro, python3-pip,
+          genisoimage, python3-progressbar, python3-prettytable, python3-pyvmomi
diff --git a/RO-VIM-vmware/tox.ini b/RO-VIM-vmware/tox.ini
new file mode 100644 (file)
index 0000000..448b263
--- /dev/null
@@ -0,0 +1,42 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_vmware --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_vmware.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO-client/Makefile b/RO-client/Makefile
new file mode 100644 (file)
index 0000000..e689ad6
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+all: clean package
+
+clean:
+       rm -rf dist deb_dist osm_roclient-*.tar.gz osm_roclient.egg-info .eggs
+
+package:
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-roclient.postinst deb_dist/osm-roclient*/debian/
+       cd deb_dist/osm-roclient*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-client/README.rst b/RO-client/README.rst
new file mode 100644 (file)
index 0000000..0e9c887
--- /dev/null
@@ -0,0 +1,6 @@
+===========
+osm-roclient
+===========
+
+osm-roclient is a client for interact with osm-ro server
+
diff --git a/RO-client/debian/python3-osm-roclient.postinst b/RO-client/debian/python3-osm-roclient.postinst
new file mode 100755 (executable)
index 0000000..27c9044
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+
+#configure arg-autocomplete for RO-client
+su $SUDO_USER -c 'activate-global-python-argcomplete --user'
+if ! su  $SUDO_USER -c 'grep -q bash_completion.d/python-argcomplete.sh ${HOME}/.bashrc'
+then
+    echo "    inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc"
+    su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
+fi
+
diff --git a/RO-client/osm_roclient/roclient.py b/RO-client/osm_roclient/roclient.py
new file mode 100755 (executable)
index 0000000..0e6d32c
--- /dev/null
@@ -0,0 +1,2520 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# PYTHON_ARGCOMPLETE_OK
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+openmano client used to interact with openmano-server (openmanod)
+"""
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ = "$09-oct-2014 09:09:48$"
+__version__ = "0.5.0"
+version_date = "2019-010-04"
+
+from argcomplete.completers import FilesCompleter
+import os
+import argparse
+import argcomplete
+import requests
+import json
+import yaml
+import logging
+#from jsonschema import validate as js_v, exceptions as js_e
+
+
+class ArgumentParserError(Exception):
+    pass
+
+
+class OpenmanoCLIError(Exception):
+    pass
+
+
+class ThrowingArgumentParser(argparse.ArgumentParser):
+    def error(self, message):
+        print("Error: {}".format(message))
+        print()
+        self.print_usage()
+        #self.print_help()
+        print()
+        print("Type 'openmano -h' for help")
+        raise ArgumentParserError
+
+
+def config(args):
+    print("OPENMANO_HOST: {}".format(mano_host))
+    print("OPENMANO_PORT: {}".format(mano_port))
+    if args.n:
+        logger.debug("resolving tenant and datacenter names")
+        mano_tenant_id = "None"
+        mano_tenant_name = "None"
+        mano_datacenter_id = "None"
+        mano_datacenter_name = "None"
+        # WIM additions
+        logger.debug("resolving WIM names")
+        mano_wim_id = "None"
+        mano_wim_name = "None"
+        try:
+            mano_tenant_id = _get_item_uuid("tenants", mano_tenant)
+            URLrequest = "http://{}:{}/openmano/tenants/{}".format(mano_host, mano_port, mano_tenant_id)
+            mano_response = requests.get(URLrequest)
+            logger.debug("openmano response: %s", mano_response.text )
+            content = mano_response.json()
+            mano_tenant_name = content["tenant"]["name"]
+            URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, mano_tenant_id,
+                                                                          mano_datacenter)
+            mano_response = requests.get(URLrequest)
+            logger.debug("openmano response: %s", mano_response.text)
+            content = mano_response.json()
+            if "error" not in content:
+                mano_datacenter_id = content["datacenter"]["uuid"]
+                mano_datacenter_name = content["datacenter"]["name"]
+
+            # WIM
+            URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(
+            mano_host, mano_port, mano_tenant_id, mano_wim)
+            mano_response = requests.get(URLrequest)
+            logger.debug("openmano response: %s", mano_response.text)
+            content = mano_response.json()
+            if "error" not in content:
+                mano_wim_id = content["wim"]["uuid"]
+                mano_wim_name = content["wim"]["name"]
+
+        except OpenmanoCLIError:
+            pass
+        print( "OPENMANO_TENANT: {}".format(mano_tenant))
+        print( "    Id: {}".format(mano_tenant_id))
+        print( "    Name: {}".format(mano_tenant_name))
+        print( "OPENMANO_DATACENTER: {}".format(mano_datacenter))
+        print( "    Id: {}".format(mano_datacenter_id))
+        print( "    Name: {}".format(mano_datacenter_name))
+        # WIM
+        print( "OPENMANO_WIM: {}".format( (mano_wim)))
+        print( "    Id: {}".format(mano_wim_id))
+        print( "    Name: {}".format(mano_wim_name))
+
+    else:
+        print("OPENMANO_TENANT: {}".format(mano_tenant))
+        print("OPENMANO_DATACENTER: {}".format(mano_datacenter))
+        # WIM
+        print("OPENMANO_WIM: {}".format(mano_wim))
+
+def _print_verbose(mano_response, verbose_level=0):
+    content = mano_response.json()
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if type(content)!=dict or len(content)!=1:
+        # print("Non expected format output")
+        print(str(content))
+        return result
+
+    val = next(iter(content.values()))
+    if type(val)==str:
+        print(val)
+        return result
+    elif type(val) == list:
+        content_list = val
+    elif type(val)==dict:
+        content_list = [val]
+    else:
+        # print("Non expected dict/list format output"
+        print(str(content))
+        return result
+
+    # print(content_list
+    if verbose_level==None:
+        verbose_level=0
+    if verbose_level >= 3:
+        print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+        return result
+
+    if mano_response.status_code == 200:
+        uuid = None
+        for content in content_list:
+            if "uuid" in content:
+                uuid = content['uuid']
+            elif "id" in content:
+                uuid = content['id']
+            elif "vim_id" in content:
+                uuid = content['vim_id']
+            name = content.get('name');
+            if not uuid:
+                uuid = ""
+            if not name:
+                name = ""
+            myoutput = "{:38} {:20}".format(uuid, name)
+            if content.get("status"):
+                myoutput += " {:20}".format(content['status'])
+            elif "enabled" in content and not content["enabled"]:
+                myoutput += " enabled=False".ljust(20)
+            if verbose_level >=1:
+                if content.get('created_at'):
+                    myoutput += " {:20}".format(content['created_at'])
+                if content.get('sdn_attached_ports'):
+                    #myoutput += " " + str(content['sdn_attached_ports']).ljust(20)
+                    myoutput += "\nsdn_attached_ports:\n" + yaml.safe_dump(content['sdn_attached_ports'], indent=4, default_flow_style=False)
+                if verbose_level >=2:
+                    new_line='\n'
+                    if content.get('type'):
+                        myoutput += new_line + "  Type: {:29}".format(content['type'])
+                        new_line=''
+                    if content.get('description'):
+                        myoutput += new_line + "  Description: {:20}".format(content['description'])
+            print(myoutput)
+    else:
+        print(content['error']['description'])
+    return result
+
+def parser_json_yaml(file_name):
+    try:
+        with open(file_name, "r") as f:
+            text = f.read()
+    except Exception as e:
+        return (False, str(e))
+
+    #Read and parse file
+    if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text):
+        try:
+            config = yaml.load(text, Loader=yaml.SafeLoader)
+        except yaml.YAMLError as exc:
+            error_pos = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                error_pos = " at line:{} column:{}".format(mark.line+1, mark.column+1)
+            return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos)
+    else: #json
+        try:
+            config = json.loads(text)
+        except Exception as e:
+            return (False, "Error loading file '"+file_name+"' json format error " + str(e) )
+    return True, config
+
+def _load_file_or_yaml(content):
+    '''
+    'content' can be or a yaml/json file or a text containing a yaml/json text format
+    This function autodetect, trying to load and parse the file,
+    if fails trying to parse the 'content' text
+    Returns the dictionary once parsed, or print an error and finish the program
+    '''
+    #Check config file exists
+    if os.path.isfile(content):
+        r,payload = parser_json_yaml(content)
+        if not r:
+            print(payload)
+            exit(-1)
+    elif "{" in content or ":" in content:
+        try:
+            payload = yaml.load(content)
+        except yaml.YAMLError as exc:
+            error_pos = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                error_pos = " at position: ({}:{})".format(mark.line+1, mark.column+1)
+            print("Error loading yaml/json text"+error_pos)
+            exit (-1)
+    else:
+        print("'{}' is neither a valid file nor a yaml/json content".format(content))
+        exit(-1)
+    return payload
+
+def _get_item_uuid(item, item_name_id, tenant=None):
+    if tenant:
+        URLrequest = "http://{}:{}/openmano/{}/{}".format(mano_host, mano_port, tenant, item)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}".format(mano_host, mano_port, item)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    # print(content
+    found = 0
+    for i in content[item]:
+        if i["uuid"] == item_name_id:
+            return item_name_id
+        if i["name"] == item_name_id:
+            uuid = i["uuid"]
+            found += 1
+        if item_name_id.startswith("osm_id=") and i.get("osm_id") == item_name_id[7:]:
+            uuid = i["uuid"]
+            found += 1
+    if found == 0:
+        raise OpenmanoCLIError("No {} found with name/uuid '{}'".format(item[:-1], item_name_id))
+    elif found > 1:
+        raise OpenmanoCLIError("{} {} found with name '{}'. uuid must be used".format(found, item, item_name_id))
+    return uuid
+#
+# def check_valid_uuid(uuid):
+#     id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+#     try:
+#         js_v(uuid, id_schema)
+#         return True
+#     except js_e.ValidationError:
+#         return False
+
+def _get_tenant(tenant_name_id = None):
+    if not tenant_name_id:
+        tenant_name_id = mano_tenant
+        if not mano_tenant:
+            raise OpenmanoCLIError("'OPENMANO_TENANT' environment variable is not set")
+    return _get_item_uuid("tenants", tenant_name_id)
+
+def _get_datacenter(datacenter_name_id = None, tenant = "any"):
+    if not datacenter_name_id:
+        datacenter_name_id = mano_datacenter
+        if not datacenter_name_id:
+            raise OpenmanoCLIError("neither 'OPENMANO_DATACENTER' environment variable is set nor --datacenter option is used")
+    return _get_item_uuid("datacenters", datacenter_name_id, tenant)
+
+# WIM
+def _get_wim(wim_name_id = None, tenant = "any"):
+    if not wim_name_id:
+        wim_name_id = mano_wim
+        if not wim_name_id:
+            raise OpenmanoCLIError("neither 'OPENMANO_WIM' environment variable is set nor --wim option is used")
+    return _get_item_uuid("wims", wim_name_id, tenant)
+
+def vnf_create(args):
+    # print("vnf-create", args)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    tenant = _get_tenant()
+    myvnf = _load_file_or_yaml(args.file)
+
+    api_version = ""
+    if "vnfd:vnfd-catalog" in myvnf or "vnfd-catalog" in myvnf:
+        api_version = "/v3"
+        token = "vnfd"
+        vnfd_catalog = myvnf.get("vnfd:vnfd-catalog")
+        if not vnfd_catalog:
+            vnfd_catalog = myvnf.get("vnfd-catalog")
+        vnfds = vnfd_catalog.get("vnfd:vnfd")
+        if not vnfds:
+            vnfds = vnfd_catalog.get("vnfd")
+        vnfd = vnfds[0]
+        vdu_list = vnfd.get("vdu")
+
+    else:  # old API
+        api_version = ""
+        token = "vnfs"
+        vnfd = myvnf['vnf']
+        vdu_list = vnfd.get("VNFC")
+
+    if args.name or args.description or args.image_path or args.image_name or args.image_checksum:
+        # TODO, change this for API v3
+        # print(args.name
+        try:
+            if args.name:
+                vnfd['name'] = args.name
+            if args.description:
+                vnfd['description'] = args.description
+            if vdu_list:
+                if args.image_path:
+                    index = 0
+                    for image_path_ in args.image_path.split(","):
+                        # print("image-path", image_path_)
+                        if api_version == "/v3":
+                            if vdu_list[index].get("image"):
+                                vdu_list[index]['image'] = image_path_
+                                if "image-checksum" in vdu_list[index]:
+                                    del vdu_list[index]["image-checksum"]
+                            else:  # image name in volumes
+                                vdu_list[index]["volumes"][0]["image"] = image_path_
+                                if "image-checksum" in vdu_list[index]["volumes"][0]:
+                                    del vdu_list[index]["volumes"][0]["image-checksum"]
+                        else:
+                            vdu_list[index]['VNFC image'] = image_path_
+                            if "image name" in vdu_list[index]:
+                                del vdu_list[index]["image name"]
+                            if "image checksum" in vdu_list[index]:
+                                del vdu_list[index]["image checksum"]
+                        index += 1
+                if args.image_name:  # image name precedes if both are supplied
+                    index = 0
+                    for image_name_ in args.image_name.split(","):
+                        if api_version == "/v3":
+                            if vdu_list[index].get("image"):
+                                vdu_list[index]['image'] = image_name_
+                                if "image-checksum" in vdu_list[index]:
+                                    del vdu_list[index]["image-checksum"]
+                                if vdu_list[index].get("alternative-images"):
+                                    for a_image in vdu_list[index]["alternative-images"]:
+                                        a_image['image'] = image_name_
+                                        if "image-checksum" in a_image:
+                                            del a_image["image-checksum"]
+                            else:  # image name in volumes
+                                vdu_list[index]["volumes"][0]["image"] = image_name_
+                                if "image-checksum" in vdu_list[index]["volumes"][0]:
+                                    del vdu_list[index]["volumes"][0]["image-checksum"]
+                        else:
+                            vdu_list[index]['image name'] = image_name_
+                            if "VNFC image" in vdu_list[index]:
+                                del vdu_list[index]["VNFC image"]
+                        index += 1
+                if args.image_checksum:
+                    index = 0
+                    for image_checksum_ in args.image_checksum.split(","):
+                        if api_version == "/v3":
+                            if vdu_list[index].get("image"):
+                                vdu_list[index]['image-checksum'] = image_checksum_
+                                if vdu_list[index].get("alternative-images"):
+                                    for a_image in vdu_list[index]["alternative-images"]:
+                                        a_image['image-checksum'] = image_checksum_
+                            else:  # image name in volumes
+                                vdu_list[index]["volumes"][0]["image-checksum"] = image_checksum_
+                        else:
+                            vdu_list[index]['image checksum'] = image_checksum_
+                        index += 1
+        except (KeyError, TypeError) as e:
+            if str(e) == 'vnf':           error_pos= "missing field 'vnf'"
+            elif str(e) == 'name':        error_pos= "missing field  'vnf':'name'"
+            elif str(e) == 'description': error_pos= "missing field  'vnf':'description'"
+            elif str(e) == 'VNFC':        error_pos= "missing field  'vnf':'VNFC'"
+            elif str(e) == str(index):    error_pos= "field  'vnf':'VNFC' must be an array"
+            elif str(e) == 'VNFC image':  error_pos= "missing field 'vnf':'VNFC'['VNFC image']"
+            elif str(e) == 'image name':  error_pos= "missing field 'vnf':'VNFC'['image name']"
+            elif str(e) == 'image checksum':  error_pos= "missing field 'vnf':'VNFC'['image checksum']"
+            else:                       error_pos="wrong format"
+            print("Wrong VNF descriptor: " + error_pos)
+            return -1
+    payload_req = json.dumps(myvnf)
+
+    # print(payload_req
+
+    URLrequest = "http://{}:{}/openmano{}/{}/{token}".format(mano_host, mano_port, api_version, tenant, token=token)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+
+    return _print_verbose(mano_response, args.verbose)
+
+def vnf_list(args):
+    # print("vnf-list",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    if args.name:
+        toshow = _get_item_uuid("vnfs", args.name, tenant)
+        URLrequest = "http://{}:{}/openmano/{}/vnfs/{}".format(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/vnfs".format(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4)
+    if args.verbose==None:
+        args.verbose=0
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        if not args.name:
+            if args.verbose >= 3:
+                print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+                return result
+            if len(content['vnfs']) == 0:
+                print("No VNFs were found.")
+                return 404   # HTTP_Not_Found
+            for vnf in content['vnfs']:
+                myoutput = "{:38} {:20}".format(vnf['uuid'], vnf['name'])
+                if vnf.get('osm_id') or args.verbose >= 1:
+                    myoutput += " osm_id={:20}".format(vnf.get('osm_id'))
+                if args.verbose >= 1:
+                    myoutput += " {}".format(vnf['created_at'])
+                print(myoutput)
+                if args.verbose >= 2:
+                    print("  Description: {}".format(vnf['description']))
+                    # print("  VNF descriptor file: {}".format(vnf['path']))
+        else:
+            if args.verbose:
+                print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+                return result
+            vnf = content['vnf']
+            print("{:38} {:20} osm_id={:20} {:20}".format(vnf['uuid'], vnf['name'], vnf.get('osm_id'),
+                                                          vnf['created_at']), end=" ")
+            print("  Description: {}".format(vnf['description']))
+            # print(" VNF descriptor file: {}".format(vnf['path']))
+            print("  VMs:")
+            for vm in vnf['VNFC']:
+                print("    {:20} osm_id={:20} {}".format(vm['name'], vm.get('osm_id'), vm['description']))
+            if len(vnf['nets']) > 0:
+                print("  Internal nets:")
+                for net in vnf['nets']:
+                    print("    {:20} {}".format(net['name'], net['description']))
+            if len(vnf['external-connections']) > 0:
+                print("  External interfaces:")
+                for interface in vnf['external-connections']:
+                    print("    {:20} {:20} {:20} {:14}".format(
+                        interface['external_name'], interface['vm_name'],
+                        interface['internal_name'],
+                        interface.get('vpci') if interface.get('vpci') else ""))
+    else:
+        print(content['error']['description'])
+        if args.verbose:
+            print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+    return result
+
+def vnf_delete(args):
+    # print("vnf-delete",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    todelete = _get_item_uuid("vnfs", args.name, tenant=tenant)
+    if not args.force:
+        r = input("Delete VNF {} (y/N)? ".format(todelete))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://{}:{}/openmano/{}/vnfs/{}".format(mano_host, mano_port, tenant, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+def scenario_create(args):
+    # print("scenario-create",args
+    tenant = _get_tenant()
+    headers_req = {'content-type': 'application/yaml'}
+    myscenario = _load_file_or_yaml(args.file)
+    if "nsd:nsd-catalog" in myscenario or "nsd-catalog" in myscenario:
+        api_version = "/v3"
+        token = "nsd"
+        nsd_catalog = myscenario.get("nsd:nsd-catalog")
+        if not nsd_catalog:
+            nsd_catalog = myscenario.get("nsd-catalog")
+        nsds = nsd_catalog.get("nsd:nsd")
+        if not nsds:
+            nsds = nsd_catalog.get("nsd")
+        nsd = nsds[0]
+    else:  # API<v3
+        api_version = ""
+        token = "scenarios"
+        if "scenario" in myscenario:
+            nsd = myscenario["scenario"]
+        else:
+            nsd = myscenario
+    # TODO modify for API v3
+    if args.name:
+        nsd['name'] = args.name
+    if args.description:
+        nsd['description'] = args.description
+    payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False,
+                                 allow_unicode=True)
+
+    # print(payload_req
+    URLrequest = "http://{host}:{port}/openmano{api}/{tenant}/{token}".format(
+        host=mano_host, port=mano_port, api=api_version, tenant=tenant, token=token)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def scenario_list(args):
+    # print("scenario-list",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    if args.name:
+        toshow = _get_item_uuid("scenarios", args.name, tenant)
+        URLrequest = "http://{}:{}/openmano/{}/scenarios/{}".format(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/scenarios".format(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4)
+    if args.verbose==None:
+        args.verbose=0
+
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        if not args.name:
+            if args.verbose >= 3:
+                print( yaml.safe_dump(content, indent=4, default_flow_style=False))
+                return result
+            if len(content['scenarios']) == 0:
+                print( "No scenarios were found.")
+                return 404 #HTTP_Not_Found
+            for scenario in content['scenarios']:
+                myoutput = "{:38} {:20}".format(scenario['uuid'], scenario['name'])
+                if scenario.get('osm_id') or args.verbose >= 1:
+                    myoutput += " osm_id={:20}".format(scenario.get('osm_id'))
+                if args.verbose >= 1:
+                    myoutput += " {}".format(scenario['created_at'])
+                print(myoutput)
+                if args.verbose >=2:
+                    print("  Description: {}".format(scenario['description']))
+        else:
+            if args.verbose:
+                print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+                return result
+            scenario = content['scenario']
+            print("{:38} {:20} osm_id={:20} {:20}".format(scenario['uuid'], scenario['name'], scenario.get('osm_id'),
+                                                          scenario['created_at']), end=" ")
+            print("  Description: {}".format(scenario['description']))
+            print("  VNFs:")
+            for vnf in scenario['vnfs']:
+                print("    {:38} {:20} vnf_index={} {}".format(vnf['vnf_id'], vnf['name'], vnf.get("member_vnf_index"),
+                                                                vnf['description']))
+            if len(scenario['nets']) > 0:
+                print("  nets:")
+                for net in scenario['nets']:
+                    description = net['description']
+                    if not description:   # if description does not exist, description is "-". Valid for external and internal nets.
+                        description = '-'
+                    vim_id = ""
+                    if net.get('vim_id'):
+                        vim_id = " vim_id=" + net["vim_id"]
+                    external = ""
+                    if net["external"]:
+                        external = " external"
+                    print("    {:20} {:38} {:30}{}{}".format(net['name'], net['uuid'], description, vim_id, external))
+    else:
+        print(content['error']['description'])
+        if args.verbose:
+            print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+    return result
+
+def scenario_delete(args):
+    # print("scenario-delete",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    todelete = _get_item_uuid("scenarios", args.name, tenant=tenant)
+    if not args.force:
+        r = input("Delete scenario {} (y/N)? ".format(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://{}:{}/openmano/{}/scenarios/{}".format(mano_host, mano_port, tenant, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print( content['result'])
+    else:
+        print( content['error']['description'])
+    return result
+
+def scenario_deploy(args):
+    print("This command is deprecated, use 'openmano instance-scenario-create --scenario {} --name {}' instead!!!".format(args.scenario, args.name))
+    print()
+    args.file = None
+    args.netmap_use = None
+    args.netmap_create = None
+    args.keypair = None
+    args.keypair_auto = None
+    return instance_create(args)
+
+#     # print("scenario-deploy",args
+#     headers_req = {'content-type': 'application/json'}
+#     action = {}
+#     actionCmd="start"
+#     if args.nostart:
+#         actionCmd="reserve"
+#     action[actionCmd] = {}
+#     action[actionCmd]["instance_name"] = args.name
+#     if args.datacenter != None:
+#         action[actionCmd]["datacenter"] = args.datacenter
+#     elif mano_datacenter != None:
+#         action[actionCmd]["datacenter"] = mano_datacenter
+#
+#     if args.description:
+#         action[actionCmd]["description"] = args.description
+#     payload_req = json.dumps(action, indent=4)
+#     # print(payload_req
+#
+#     URLrequest = "http://{}:{}/openmano/{}/scenarios/{}/action".format(mano_host, mano_port, mano_tenant, args.scenario)
+#     logger.debug("openmano request: %s", payload_req)
+#     mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+#     logger.debug("openmano response: %s", mano_response.text )
+#     if args.verbose==None:
+#         args.verbose=0
+#
+#     result = 0 if mano_response.status_code==200 else mano_response.status_code
+#     content = mano_response.json()
+#     # print(json.dumps(content, indent=4))
+#     if args.verbose >= 3:
+#         print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+#         return result
+#
+#     if mano_response.status_code == 200:
+#         myoutput = "{} {}".format(content['uuid'].ljust(38),content['name'].ljust(20))
+#         if args.verbose >=1:
+#             myoutput = "{} {}".format(myoutput, content['created_at'].ljust(20))
+#         if args.verbose >=2:
+#             myoutput = "{} {} {}".format(myoutput, content['description'].ljust(30))
+#         print(myoutput)
+#         print("")
+#         print("To check the status, run the following command:")
+#         print("openmano instance-scenario-list <instance_id>"
+#     else:
+#         print(content['error']['description'])
+#     return result
+
+def scenario_verify(args):
+    # print("scenario-verify",args)
+    tenant = _get_tenant()
+    headers_req = {'content-type': 'application/json'}
+    action = {}
+    action["verify"] = {}
+    action["verify"]["instance_name"] = "scen-verify-return5"
+    payload_req = json.dumps(action, indent=4)
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/{}/scenarios/{}/action".format(mano_host, mano_port, tenant, args.scenario)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+def instance_create(args):
+    tenant = _get_tenant()
+    headers_req = {'content-type': 'application/yaml'}
+    myInstance={"instance": {}, "schema_version": "0.1"}
+    if args.file:
+        instance_dict = _load_file_or_yaml(args.file)
+        if "instance" not in instance_dict:
+            myInstance = {"instance": instance_dict, "schema_version": "0.1"}
+        else:
+            myInstance = instance_dict
+    if args.name:
+        myInstance["instance"]['name'] = args.name
+    if args.description:
+        myInstance["instance"]['description'] = args.description
+    if args.nostart:
+        myInstance["instance"]['action'] = "reserve"
+    #datacenter
+    datacenter = myInstance["instance"].get("datacenter")
+    if args.datacenter != None:
+        datacenter = args.datacenter
+    myInstance["instance"]["datacenter"] = _get_datacenter(datacenter, tenant)
+    #scenario
+    scenario = myInstance["instance"].get("scenario")
+    if args.scenario != None:
+        scenario = args.scenario
+    if not scenario:
+        print("you must provide a scenario in the file descriptor or with --scenario")
+        return -1
+    if isinstance(scenario, str):
+        myInstance["instance"]["scenario"] = _get_item_uuid("scenarios", scenario, tenant)
+    if args.netmap_use:
+        if "networks" not in myInstance["instance"]:
+            myInstance["instance"]["networks"] = {}
+        for net in args.netmap_use:
+            net_comma_list = net.split(",")
+            for net_comma in net_comma_list:
+                net_tuple = net_comma.split("=")
+                if len(net_tuple) != 2:
+                    print("error at netmap-use. Expected net-scenario=net-datacenter. ({})?".format(net_comma))
+                    return
+                net_scenario   = net_tuple[0].strip()
+                net_datacenter = net_tuple[1].strip()
+                if net_scenario not in myInstance["instance"]["networks"]:
+                    myInstance["instance"]["networks"][net_scenario] = {}
+                if "sites" not in myInstance["instance"]["networks"][net_scenario]:
+                    myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
+                myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-use"] = net_datacenter
+    if args.netmap_create:
+        if "networks" not in myInstance["instance"]:
+            myInstance["instance"]["networks"] = {}
+        for net in args.netmap_create:
+            net_comma_list = net.split(",")
+            for net_comma in net_comma_list:
+                net_tuple = net_comma.split("=")
+                if len(net_tuple) == 1:
+                    net_scenario   = net_tuple[0].strip()
+                    net_datacenter = None
+                elif len(net_tuple) == 2:
+                    net_scenario   = net_tuple[0].strip()
+                    net_datacenter = net_tuple[1].strip()
+                else:
+                    print("error at netmap-create. Expected net-scenario=net-datacenter or net-scenario. ({})?".format(
+                        net_comma))
+                    return
+                if net_scenario not in myInstance["instance"]["networks"]:
+                    myInstance["instance"]["networks"][net_scenario] = {}
+                if "sites" not in myInstance["instance"]["networks"][net_scenario]:
+                    myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
+                myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-create"] = net_datacenter
+    if args.keypair:
+        if "cloud-config" not in myInstance["instance"]:
+            myInstance["instance"]["cloud-config"] = {}
+        cloud_config = myInstance["instance"]["cloud-config"]
+        for key in args.keypair:
+            index = key.find(":")
+            if index<0:
+                if "key-pairs" not in cloud_config:
+                    cloud_config["key-pairs"] = []
+                cloud_config["key-pairs"].append(key)
+            else:
+                user = key[:index]
+                key_ = key[index+1:]
+                key_list = key_.split(",")
+                if "users" not in cloud_config:
+                    cloud_config["users"] = []
+                cloud_config["users"].append({"name": user, "key-pairs": key_list  })
+    if args.keypair_auto:
+        try:
+            keys=[]
+            home = os.getenv("HOME")
+            user = os.getenv("USER")
+            files = os.listdir(home+'/.ssh')
+            for file in files:
+                if file[-4:] == ".pub":
+                    with open(home+'/.ssh/'+file, 'r') as f:
+                        keys.append(f.read())
+            if not keys:
+                print("Cannot obtain any public ssh key from '{}'. Try not using --keymap-auto".format(home+'/.ssh'))
+                return 1
+        except Exception as e:
+            print("Cannot obtain any public ssh key. Error '{}'. Try not using --keymap-auto".format(str(e)))
+            return 1
+
+        if "cloud-config" not in myInstance["instance"]:
+            myInstance["instance"]["cloud-config"] = {}
+        cloud_config = myInstance["instance"]["cloud-config"]
+        if "key-pairs" not in cloud_config:
+            cloud_config["key-pairs"] = []
+        if user:
+            if "users" not in cloud_config:
+                cloud_config["users"] = []
+            cloud_config["users"].append({"name": user, "key-pairs": keys })
+
+    payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False,
+                                 allow_unicode=True)
+    logger.debug("openmano request: %s", payload_req)
+    URLrequest = "http://{}:{}/openmano/{}/instances".format(mano_host, mano_port, tenant)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if args.verbose >= 3:
+        print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+        return result
+
+    if mano_response.status_code == 200:
+        myoutput = "{:38} {:20}".format(content['uuid'], content['name'])
+        if args.verbose >=1:
+            myoutput = "{} {:20}".format(myoutput, content['created_at'])
+        if args.verbose >=2:
+            myoutput = "{} {:30}".format(myoutput, content['description'])
+        print(myoutput)
+    else:
+        print(content['error']['description'])
+    return result
+
+def instance_scenario_list(args):
+    # print("instance-scenario-list",args)
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    if args.name:
+        toshow = _get_item_uuid("instances", args.name, tenant)
+        URLrequest = "http://{}:{}/openmano/{}/instances/{}".format(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/instances".format(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4)
+    if args.verbose==None:
+        args.verbose=0
+
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        if not args.name:
+            if args.verbose >= 3:
+                print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+                return result
+            if len(content['instances']) == 0:
+                print("No scenario instances were found.")
+                return result
+            for instance in content['instances']:
+                myoutput = "{:38} {:20}".format(instance['uuid'], instance['name'])
+                if args.verbose >=1:
+                    myoutput = "{} {:20}".format(myoutput, instance['created_at'])
+                print(myoutput)
+                if args.verbose >=2:
+                    print("Description: {}".format(instance['description']))
+        else:
+            if args.verbose:
+                print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+                return result
+            instance = content
+            print("{:38} {:20} {:20}".format(instance['uuid'],instance['name'],instance['created_at']))
+            print("Description: {}".format(instance['description']))
+            print("Template scenario id: {}".format(instance['scenario_id']))
+            print("Template scenario name: {}".format(instance['scenario_name']))
+            print("---------------------------------------")
+            print("VNF instances: {}".format(len(instance['vnfs'])))
+            for vnf in instance['vnfs']:
+                # print("    {} {} Template vnf name: {} Template vnf id: {}".format(vnf['uuid'].ljust(38), vnf['name'].ljust(20), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38))
+                print("    {:38} {:20} Template vnf id: {:38}".format(vnf['uuid'], vnf['vnf_name'], vnf['vnf_id']))
+            if len(instance['nets'])>0:
+                print("---------------------------------------")
+                print("Internal nets:")
+                for net in instance['nets']:
+                    if net['created']:
+                        print("    {:38} {:12} VIM ID: {}".format(net['uuid'], net['status'], net['vim_net_id']))
+                print("---------------------------------------")
+                print("External nets:")
+                for net in instance['nets']:
+                    if not net['created']:
+                        print("    {:38} {:12} VIM ID: {}".format(net['uuid'], net['status'], net['vim_net_id']))
+            print("---------------------------------------")
+            print("VM instances:")
+            for vnf in instance['vnfs']:
+                for vm in vnf['vms']:
+                    print("    {:38} {:20} {:20} {:12} VIM ID: {}".format(vm['uuid'], vnf['vnf_name'], vm['name'],
+                                                                          vm['status'], vm['vim_vm_id']))
+    else:
+        print(content['error']['description'])
+        if args.verbose:
+            print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+    return result
+
+def instance_scenario_status(args):
+    print("instance-scenario-status")
+    return 0
+
+def instance_scenario_delete(args):
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    todelete = _get_item_uuid("instances", args.name, tenant=tenant)
+    # print("instance-scenario-delete",args)
+    if not args.force:
+        r = input("Delete scenario instance {} (y/N)? ".format(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return
+    URLrequest = "http://{}:{}/openmano/{}/instances/{}".format(mano_host, mano_port, tenant, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+def get_action(args):
+    if not args.all:
+        tenant = _get_tenant()
+    else:
+        tenant = "any"
+    if not args.instance:
+        instance_id = "any"
+    else:
+        instance_id =args.instance
+    action_id = ""
+    if args.id:
+        action_id = "/" + args.id
+    URLrequest = "http://{}:{}/openmano/{}/instances/{}/action{}".format(mano_host, mano_port, tenant, instance_id,
+                                                                         action_id)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose == None:
+        args.verbose = 0
+    if args.id != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+def instance_scenario_action(args):
+    # print("instance-scenario-action", args)
+    tenant = _get_tenant()
+    toact = _get_item_uuid("instances", args.name, tenant=tenant)
+    action={}
+    action[ args.action ] = yaml.safe_load(args.param)
+    if args.vnf:
+        action["vnfs"] = args.vnf
+    if args.vm:
+        action["vms"] = args.vm
+
+    headers_req = {'content-type': 'application/json'}
+    payload_req = json.dumps(action, indent=4)
+    URLrequest = "http://{}:{}/openmano/{}/instances/{}/action".format(mano_host, mano_port, tenant, toact)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if mano_response.status_code == 200:
+        if args.verbose:
+            print(yaml.safe_dump(content, indent=4, default_flow_style=False))
+            return result
+        if "instance_action_id" in content:
+            print("instance_action_id={}".format(content["instance_action_id"]))
+        else:
+            for uuid,c in content.items():
+                print("{:38} {:20} {:20}".format(uuid, c.get('name'), c.get('description')))
+    else:
+        print(content['error']['description'])
+    return result
+
+
+def instance_vnf_list(args):
+    print("instance-vnf-list")
+    return 0
+
+def instance_vnf_status(args):
+    print("instance-vnf-status")
+    return 0
+
+def tenant_create(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    tenant_dict={"name": args.name}
+    if args.description!=None:
+        tenant_dict["description"] = args.description
+    payload_req = json.dumps( {"tenant": tenant_dict })
+
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/tenants".format(mano_host, mano_port)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def tenant_list(args):
+    # print("tenant-list",args)
+    if args.name:
+        toshow = _get_item_uuid("tenants", args.name)
+        URLrequest = "http://{}:{}/openmano/tenants/{}".format(mano_host, mano_port, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/tenants".format(mano_host, mano_port)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+def tenant_delete(args):
+    # print("tenant-delete",args)
+    todelete = _get_item_uuid("tenants", args.name)
+    if not args.force:
+        r = input("Delete tenant {} (y/N)? ".format(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://{}:{}/openmano/tenants/{}".format(mano_host, mano_port, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+def datacenter_attach(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    datacenter_dict={}
+    if args.vim_tenant_id != None:
+        datacenter_dict['vim_tenant'] = args.vim_tenant_id
+    if args.vim_tenant_name != None:
+        datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
+    if args.user != None:
+        datacenter_dict['vim_username'] = args.user
+    if args.password != None:
+        datacenter_dict['vim_password'] = args.password
+    if args.config!=None:
+        datacenter_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps( {"datacenter": datacenter_dict })
+
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, datacenter)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = _print_verbose(mano_response, args.verbose)
+    #provide addional information if error
+    if mano_response.status_code != 200:
+        content = mano_response.json()
+        if "already in use for  'name'" in content['error']['description'] and \
+                "to database vim_tenants table" in content['error']['description']:
+            print("Try to specify a different name with --vim-tenant-name")
+    return result
+
+
+def datacenter_edit_vim_tenant(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not (args.vim_tenant_id or args.vim_tenant_name or args.user or args.password or args.config):
+        raise OpenmanoCLIError("Error. At least one parameter must be updated.")
+
+    datacenter_dict = {}
+    if args.vim_tenant_id != None:
+        datacenter_dict['vim_tenant'] = args.vim_tenant_id
+    if args.vim_tenant_name != None:
+        datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
+    if args.user != None:
+        datacenter_dict['vim_username'] = args.user
+    if args.password != None:
+        datacenter_dict['vim_password'] = args.password
+    if args.config != None:
+        datacenter_dict["config"] = _load_file_or_yaml(args.config)
+    payload_req = json.dumps({"datacenter": datacenter_dict})
+
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, datacenter)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+
+    return result
+
+def datacenter_detach(args):
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.delete(URLrequest, headers=headers_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+def datacenter_create(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    datacenter_dict={"name": args.name, "vim_url": args.url}
+    if args.description!=None:
+        datacenter_dict["description"] = args.description
+    if args.type!=None:
+        datacenter_dict["type"] = args.type
+    if args.url!=None:
+        datacenter_dict["vim_url_admin"] = args.url_admin
+    if args.config!=None:
+        datacenter_dict["config"] = _load_file_or_yaml(args.config)
+    if args.sdn_controller!=None:
+        tenant = _get_tenant()
+        sdn_controller = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
+        if not 'config' in datacenter_dict:
+            datacenter_dict['config'] = {}
+        datacenter_dict['config']['sdn-controller'] = sdn_controller
+    payload_req = json.dumps( {"datacenter": datacenter_dict })
+
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/datacenters".format(mano_host, mano_port)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def datacenter_delete(args):
+    # print("datacenter-delete",args)
+    todelete = _get_item_uuid("datacenters", args.name, "any")
+    if not args.force:
+        r = input("Delete datacenter {} (y/N)? ".format(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://{}:{}/openmano/datacenters/{}".format(mano_host, mano_port, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+
+def datacenter_list(args):
+    # print("datacenter-list",args)
+    tenant='any' if args.all else _get_tenant()
+
+    if args.name:
+        toshow = _get_item_uuid("datacenters", args.name, tenant)
+        URLrequest = "http://{}:{}/openmano/{}/datacenters/{}".format(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/datacenters".format(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_sdn_port_mapping_set(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not args.file:
+        raise OpenmanoCLIError(
+            "No yaml/json has been provided specifying the SDN port mapping")
+    sdn_port_mapping = _load_file_or_yaml(args.file)
+    payload_req = json.dumps({"sdn_port_mapping": sdn_port_mapping})
+
+    # read
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    port_mapping = mano_response.json()
+    if mano_response.status_code != 200:
+        str(mano_response.json())
+        raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
+    if len(port_mapping["sdn_port_mapping"]["ports_mapping"]) > 0:
+        if not args.force:
+            r = input("Datacenter {} already contains a port mapping. Overwrite? (y/N)? ".format(datacenter))
+            if not (len(r) > 0 and r[0].lower() == "y"):
+                return 0
+
+        # clear
+        URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+        mano_response = requests.delete(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text)
+        if mano_response.status_code != 200:
+            return _print_verbose(mano_response, args.verbose)
+
+    # set
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_sdn_port_mapping_list(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    return _print_verbose(mano_response, 4)
+
+
+def datacenter_sdn_port_mapping_clear(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+
+    if not args.force:
+        r = input("Clean SDN port mapping for datacenter {} (y/N)? ".format(datacenter))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/sdn_mapping".format(mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    return _print_verbose(mano_response, args.verbose)
+
+
+def sdn_controller_create(args):
+    tenant = _get_tenant()
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    error_msg=[]
+    if not args.ip: error_msg.append("'ip'")
+    if not args.port: error_msg.append("'port'")
+    if not args.dpid: error_msg.append("'dpid'")
+    if not args.type: error_msg.append("'type'")
+    if error_msg:
+        raise OpenmanoCLIError("The following arguments are required: " + ",".join(error_msg))
+
+    controller_dict = {}
+    controller_dict['name'] = args.name
+    controller_dict['ip'] = args.ip
+    controller_dict['port'] = int(args.port)
+    controller_dict['dpid'] = args.dpid
+    controller_dict['type'] = args.type
+    if args.description != None:
+        controller_dict['description'] = args.description
+    if args.user != None:
+        controller_dict['user'] = args.user
+    if args.password != None:
+        controller_dict['password'] = args.password
+
+    payload_req = json.dumps({"sdn_controller": controller_dict})
+
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/{}/sdn_controllers".format(mano_host, mano_port, tenant)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    return result
+
+
+def sdn_controller_edit(args):
+    tenant = _get_tenant()
+    controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    controller_dict = {}
+    if args.new_name:
+        controller_dict['name'] = args.new_name
+    if args.ip:
+        controller_dict['ip'] = args.ip
+    if args.port:
+        controller_dict['port'] = int(args.port)
+    if args.dpid:
+        controller_dict['dpid'] = args.dpid
+    if args.type:
+        controller_dict['type'] = args.type
+    if args.description:
+        controller_dict['description'] = args.description
+    if args.user:
+        controller_dict['user'] = args.user
+    if args.password:
+        controller_dict['password'] = args.password
+
+    if not controller_dict:
+        raise OpenmanoCLIError("At least one parameter must be edited")
+
+    if not args.force:
+        r = input("Update SDN controller {} (y/N)? ".format(args.name))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    payload_req = json.dumps({"sdn_controller": controller_dict})
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/{}/sdn_controllers/{}".format(mano_host, mano_port, tenant, controller_uuid)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    return result
+
+
+def sdn_controller_list(args):
+    tenant = _get_tenant()
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if args.name:
+        toshow = _get_item_uuid("sdn_controllers", args.name, tenant)
+        URLrequest = "http://{}:{}/openmano/{}/sdn_controllers/{}".format(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/sdn_controllers".format(mano_host, mano_port, tenant)
+    # print(URLrequest)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+
+    # json.dumps(mano_response.json(), indent=4)
+    return _print_verbose(mano_response, args.verbose)
+
+
+def sdn_controller_delete(args):
+    tenant = _get_tenant()
+    controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
+
+    if not args.force:
+        r = input("Delete SDN controller {} (y/N)? ".format(args.name))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    URLrequest = "http://{}:{}/openmano/{}/sdn_controllers/{}".format(mano_host, mano_port, tenant, controller_uuid)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, args.verbose)
+
+def vim_action(args):
+    # print("datacenter-net-action",args)
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.datacenter, tenant)
+    if args.verbose==None:
+        args.verbose=0
+    if args.action=="list":
+        URLrequest = "http://{}:{}/openmano/{}/vim/{}/{}s".format(mano_host, mano_port, tenant, datacenter, args.item)
+        if args.name!=None:
+            args.verbose += 1
+            URLrequest += "/" + args.name
+        mano_response = requests.get(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text )
+        return _print_verbose(mano_response, args.verbose)
+    elif args.action=="delete":
+        URLrequest = "http://{}:{}/openmano/{}/vim/{}/{}s/{}".format(mano_host, mano_port, tenant, datacenter, args.item, args.name)
+        mano_response = requests.delete(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text )
+        result = 0 if mano_response.status_code==200 else mano_response.status_code
+        content = mano_response.json()
+        # print(json.dumps(content, indent=4))
+        if mano_response.status_code == 200:
+            print(content['result'])
+        else:
+            print(content['error']['description'])
+        return result
+    elif args.action=="create":
+        headers_req = {'content-type': 'application/yaml'}
+        if args.file:
+            create_dict = _load_file_or_yaml(args.file)
+            if args.item not in create_dict:
+                create_dict = {args.item: create_dict}
+        else:
+            create_dict = {args.item:{}}
+        if args.name:
+            create_dict[args.item]['name'] = args.name
+        #if args.description:
+        #    create_dict[args.item]['description'] = args.description
+        if args.item=="network":
+            if args.bind_net:
+                create_dict[args.item]['bind_net'] = args.bind_net
+            if args.type:
+                create_dict[args.item]['type'] = args.type
+            if args.shared:
+                create_dict[args.item]['shared'] = args.shared
+        if "name" not in create_dict[args.item]:
+            print("You must provide a name in the descriptor file or with the --name option")
+            return
+        payload_req = yaml.safe_dump(create_dict, explicit_start=True, indent=4, default_flow_style=False, tags=False,
+                                     allow_unicode=True)
+        logger.debug("openmano request: %s", payload_req)
+        URLrequest = "http://{}:{}/openmano/{}/vim/{}/{}s".format(mano_host, mano_port, tenant, datacenter, args.item)
+        mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+        logger.debug("openmano response: %s", mano_response.text )
+        if args.verbose==None:
+            args.verbose=0
+        return _print_verbose(mano_response, args.verbose)
+
+
+def _get_items(item, item_name_id=None, datacenter=None, tenant=None):
+    URLrequest = "http://{}:{}/openmano".format(mano_host, mano_port)
+    if tenant:
+        URLrequest += "/" + tenant
+    if datacenter:
+        URLrequest += "/vim/" + datacenter
+    if item:
+        URLrequest += "/" + item +"s"
+    if item_name_id:
+        URLrequest += "/" + item_name_id
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+
+    return mano_response
+
+
+def vim_net_sdn_attach(args):
+    #Verify the network exists in the vim
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.datacenter, tenant)
+    result = _get_items('network', item_name_id=args.vim_net, datacenter=datacenter, tenant=tenant)
+    content = yaml.load(result.content)
+    if 'networks' in content:
+        raise OpenmanoCLIError('More than one network in the vim named ' + args.vim_net + '. Use uuid instead')
+    if 'error' in content:
+        raise OpenmanoCLIError(yaml.safe_dump(content))
+    network_uuid = content['network']['id']
+
+    #Make call to attach the dataplane port to the SND network associated to the vim network
+    headers_req = {'content-type': 'application/yaml'}
+    payload_req = {'port': args.port}
+    if args.vlan:
+        payload_req['vlan'] = int(args.vlan)
+    if args.mac:
+        payload_req['mac'] = args.mac
+
+    URLrequest = "http://{}:{}/openmano/{}/vim/{}/network/{}/attach".format(mano_host, mano_port, tenant, datacenter, network_uuid)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=json.dumps(payload_req))
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    return result
+
+
+def vim_net_sdn_detach(args):
+    if not args.all and not args.id:
+        print("--all or --id must be used")
+        return 1
+
+    # Verify the network exists in the vim
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.datacenter, tenant)
+    result = _get_items('network', item_name_id=args.vim_net, datacenter=datacenter, tenant=tenant)
+    content = yaml.load(result.content)
+    if 'networks' in content:
+        raise OpenmanoCLIError('More than one network in the vim named ' + args.vim_net + '. Use uuid instead')
+    if 'error' in content:
+        raise OpenmanoCLIError(yaml.safe_dump(content))
+    network_uuid = content['network']['id']
+
+    if not args.force:
+        r = input("Confirm action' (y/N)? ")
+        if len(r) == 0 or r[0].lower() != "y":
+            return 0
+
+    if args.id:
+        URLrequest = "http://{}:{}/openmano/{}/vim/{}/network/{}/detach/{}".format(
+            mano_host, mano_port, tenant, datacenter, network_uuid, args.id)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/vim/{}/network/{}/detach".format(
+            mano_host, mano_port, tenant, datacenter, network_uuid)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    return result
+
+
+def datacenter_net_action(args):
+    if args.action == "net-update":
+        print("This command is deprecated, use 'openmano datacenter-netmap-delete --all' and 'openmano"
+              " datacenter-netmap-import' instead!!!")
+        print()
+        args.action = "netmap-delete"
+        args.netmap = None
+        args.all = True
+        r = datacenter_netmap_action(args)
+        if r == 0:
+            args.force = True
+            args.action = "netmap-import"
+            r = datacenter_netmap_action(args)
+        return r
+
+    if args.action == "net-edit":
+        args.netmap = args.net
+        args.name = None
+    elif args.action == "net-list":
+        args.netmap = None
+    elif args.action == "net-delete":
+        args.netmap = args.net
+        args.all = False
+
+    args.action = "netmap" + args.action[3:]
+    args.vim_name=None
+    args.vim_id=None
+    print("This command is deprecated, use 'openmano datacenter-{}' instead!!!".format(args.action))
+    print()
+    return datacenter_netmap_action(args)
+
+def datacenter_netmap_action(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.datacenter, tenant)
+    # print("datacenter_netmap_action",args)
+    payload_req = None
+    if args.verbose==None:
+        args.verbose=0
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/{}/datacenters/{}/netmaps".format(mano_host, mano_port, tenant, datacenter)
+
+    if args.action=="netmap-list":
+        if args.netmap:
+            URLrequest += "/" + args.netmap
+            args.verbose += 1
+        mano_response = requests.get(URLrequest)
+
+    elif args.action=="netmap-delete":
+        if args.netmap and args.all:
+            print("you can not use a netmap name and the option --all at the same time")
+            return 1
+        if args.netmap:
+            force_text= "Delete default netmap '{}' from datacenter '{}' (y/N)? ".format(args.netmap, datacenter)
+            URLrequest += "/" + args.netmap
+        elif args.all:
+            force_text="Delete all default netmaps from datacenter '{}' (y/N)? ".format(datacenter)
+        else:
+            print("you must specify a netmap name or the option --all")
+            return 1
+        if not args.force:
+            r = input(force_text)
+            if  len(r)>0  and r[0].lower()=="y":
+                pass
+            else:
+                return 0
+        mano_response = requests.delete(URLrequest, headers=headers_req)
+    elif args.action=="netmap-import":
+        if not args.force:
+            r = input("Create all the available networks from datacenter '{}' as default netmaps (y/N)? ".format(datacenter))
+            if  len(r)>0  and r[0].lower()=="y":
+                pass
+            else:
+                return 0
+        URLrequest += "/upload"
+        mano_response = requests.post(URLrequest, headers=headers_req)
+    elif args.action=="netmap-edit" or args.action=="netmap-create":
+        if args.file:
+            payload = _load_file_or_yaml(args.file)
+        else:
+            payload = {}
+        if "netmap" not in payload:
+            payload = {"netmap": payload}
+        if args.name:
+            payload["netmap"]["name"] = args.name
+        if args.vim_id:
+            payload["netmap"]["vim_id"] = args.vim_id
+        if args.action=="netmap-create" and args.vim_name:
+            payload["netmap"]["vim_name"] = args.vim_name
+        payload_req = json.dumps(payload)
+        logger.debug("openmano request: %s", payload_req)
+
+        if args.action=="netmap-edit" and not args.force:
+            if len(payload["netmap"]) == 0:
+                print("You must supply some parameter to edit")
+                return 1
+            r = input("Edit default netmap '{}' from datacenter '{}' (y/N)? ".format(args.netmap, datacenter))
+            if  len(r)>0  and r[0].lower()=="y":
+                pass
+            else:
+                return 0
+            URLrequest += "/" + args.netmap
+            mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+        else: #netmap-create
+            if "vim_name" not in payload["netmap"] and "vim_id" not in payload["netmap"]:
+                print("You must supply either --vim-id or --vim-name option; or include one of them in the file"
+                      " descriptor")
+                return 1
+            mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+
+def element_edit(args):
+    element = _get_item_uuid(args.element, args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/{}/{}".format(mano_host, mano_port, args.element, element)
+    payload=_load_file_or_yaml(args.file)
+    if args.element[:-1] not in payload:
+        payload = {args.element[:-1]: payload }
+    payload_req = json.dumps(payload)
+
+    # print(payload_req)
+    if not args.force or (args.name==None and args.filer==None):
+        r = input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ")
+        if  len(r)>0  and r[0].lower()=="y":
+            pass
+        else:
+            return 0
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_edit(args):
+    tenant = _get_tenant()
+    element = _get_item_uuid('datacenters', args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/datacenters/{}".format(mano_host, mano_port, element)
+
+    has_arguments = False
+    if args.file != None:
+        has_arguments = True
+        payload = _load_file_or_yaml(args.file)
+    else:
+        payload = {}
+
+    if args.sdn_controller != None:
+        has_arguments = True
+        if not 'config' in payload:
+            payload['config'] = {}
+        if not 'sdn-controller' in payload['config']:
+            payload['config']['sdn-controller'] = {}
+        if args.sdn_controller == 'null':
+            payload['config']['sdn-controller'] = None
+        else:
+            payload['config']['sdn-controller'] = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
+
+    if not has_arguments:
+        raise OpenmanoCLIError("At least one argument must be provided to modify the datacenter")
+
+    if 'datacenter' not in payload:
+        payload = {'datacenter': payload}
+    payload_req = json.dumps(payload)
+
+    # print(payload_req)
+    if not args.force or (args.name == None and args.filer == None):
+        r = input(" Edit datacenter " + args.name + " (y/N)? ")
+        if len(r) > 0 and r[0].lower() == "y":
+            pass
+        else:
+            return 0
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    if args.verbose == None:
+        args.verbose = 0
+    if args.name != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+# WIM
+def wim_account_create(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    wim_dict = {}
+    if args.account_name is not None:
+        wim_dict['name'] = args.account_name
+    if args.user is not None:
+        wim_dict['user'] = args.user
+    if args.password is not None:
+        wim_dict['password'] = args.password
+    if args.config is not None:
+        wim_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps({"wim_account": wim_dict})
+
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, wim)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    # provide addional information if error
+    if mano_response.status_code != 200:
+        content = mano_response.json()
+        if "already in use for  'name'" in content['error']['description'] and \
+                "to database wim_tenants table" in content['error']['description']:
+            print("Try to specify a different name with --wim-tenant-name")
+    return result
+
+
+def wim_account_delete(args):
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, wim)
+    mano_response = requests.delete(URLrequest, headers=headers_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    result = 0 if mano_response.status_code == 200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+
+def wim_account_edit(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    wim_dict = {}
+    if not args.account_name:
+        wim_dict['name'] = args.vim_tenant_name
+    if not args.user:
+        wim_dict['user'] = args.user
+    if not args.password:
+        wim_dict['password'] = args.password
+    if not args.config:
+        wim_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps({"wim_account": wim_dict})
+
+    # print(payload_req)
+
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, wim)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    # provide addional information if error
+    if mano_response.status_code != 200:
+        content = mano_response.json()
+        if "already in use for  'name'" in content['error']['description'] and \
+                "to database wim_tenants table" in content['error']['description']:
+            print("Try to specify a different name with --wim-tenant-name")
+    return result
+
+def wim_create(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    wim_dict = {"name": args.name, "wim_url": args.url}
+    if args.description != None:
+        wim_dict["description"] = args.description
+    if args.type != None:
+        wim_dict["type"] = args.type
+    if args.config != None:
+        wim_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps({"wim": wim_dict})
+
+    URLrequest = "http://{}:{}/openmano/wims".format(mano_host, mano_port)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, args.verbose)
+
+
+def wim_edit(args):
+    tenant = _get_tenant()
+    element = _get_item_uuid('wims', args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/wims/{}".format(mano_host, mano_port, element)
+
+    has_arguments = False
+    if args.file != None:
+        has_arguments = True
+        payload = _load_file_or_yaml(args.file)
+    else:
+        payload = {}
+
+    if not has_arguments:
+        raise OpenmanoCLIError("At least one argument must be provided to modify the wim")
+
+    if 'wim' not in payload:
+        payload = {'wim': payload}
+    payload_req = json.dumps(payload)
+
+    # print(payload_req)
+    if not args.force or (args.name == None and args.filer == None):
+        r = input(" Edit wim " + args.name + " (y/N)? ")
+        if len(r) > 0 and r[0].lower() == "y":
+            pass
+        else:
+            return 0
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    if args.verbose == None:
+        args.verbose = 0
+    if args.name != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+def wim_delete(args):
+    # print("wim-delete",args)
+    todelete = _get_item_uuid("wims", args.name, "any")
+    if not args.force:
+        r = input("Delete wim {} (y/N)? ".format(args.name))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+    URLrequest = "http://{}:{}/openmano/wims/{}".format(mano_host, mano_port, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = 0 if mano_response.status_code == 200 else mano_response.status_code
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+
+def wim_list(args):
+    # print("wim-list",args)
+    tenant = 'any' if args.all else _get_tenant()
+
+    if args.name:
+        toshow = _get_item_uuid("wims", args.name, tenant)
+        URLrequest = "http://{}:{}/openmano/{}/wims/{}".format(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://{}:{}/openmano/{}/wims".format(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    if args.verbose == None:
+        args.verbose = 0
+    if args.name != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+def wim_port_mapping_set(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not args.file:
+        raise OpenmanoCLIError(
+            "No yaml/json has been provided specifying the WIM port mapping")
+    wim_port_mapping = _load_file_or_yaml(args.file)
+
+    payload_req = json.dumps({"wim_port_mapping": wim_port_mapping})
+
+    # read
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    port_mapping = mano_response.json()
+
+    if mano_response.status_code != 200:
+        str(mano_response.json())
+        raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
+    # TODO: check this if statement
+    if len(port_mapping["wim_port_mapping"]) > 0:
+        if not args.force:
+            r = input("WIM {} already contains a port mapping. Overwrite? (y/N)? ".format(wim))
+            if not (len(r) > 0 and r[0].lower() == "y"):
+                return 0
+
+        # clear
+        URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+        mano_response = requests.delete(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text)
+        if mano_response.status_code != 200:
+            return _print_verbose(mano_response, args.verbose)
+
+    # set
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, 4)
+
+
+def wim_port_mapping_list(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    return _print_verbose(mano_response, 4)
+
+
+def wim_port_mapping_clear(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+
+    if not args.force:
+        r = input("Clear WIM port mapping for wim {} (y/N)? ".format(wim))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    URLrequest = "http://{}:{}/openmano/{}/wims/{}/port_mapping".format(mano_host, mano_port, tenant, wim)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    content = mano_response.json()
+    # print(json.dumps(content, indent=4))
+    result = 0 if mano_response.status_code == 200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        print(content['result'])
+    else:
+        print(content['error']['description'])
+    return result
+
+
+def version(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://{}:{}/openmano/version".format(mano_host, mano_port)
+
+    mano_response = requests.get(URLrequest, headers=headers_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    print(mano_response.text)
+
+
+def main():
+    global mano_host
+    global mano_port
+    global mano_tenant
+    global logger
+    mano_tenant = os.getenv('OPENMANO_TENANT', None)
+    mano_host = os.getenv('OPENMANO_HOST',"localhost")
+    mano_port = os.getenv('OPENMANO_PORT',"9090")
+    mano_datacenter = os.getenv('OPENMANO_DATACENTER',None)
+    # WIM env variable for default WIM
+    mano_wim = os.getenv('OPENMANO_WIM', None)
+
+    main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)')
+    main_parser.add_argument('--version', action='version', help="get version of this client",
+                            version='%(prog)s client version ' + __version__ +
+                                    " (Note: use '%(prog)s version' to get server version)")
+
+    subparsers = main_parser.add_subparsers(help='commands')
+
+    parent_parser = argparse.ArgumentParser(add_help=False)
+    parent_parser.add_argument('--verbose', '-v', action='count', help="increase verbosity level. Use several times")
+    parent_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
+
+    config_parser = subparsers.add_parser('config', parents=[parent_parser], help="prints configuration values")
+    config_parser.add_argument("-n", action="store_true", help="resolves tenant and datacenter names")
+    config_parser.set_defaults(func=config)
+
+    version_parser = subparsers.add_parser('version', parents=[parent_parser], help="get server version")
+    version_parser.set_defaults(func=version)
+
+    vnf_create_parser = subparsers.add_parser('vnf-create', parents=[parent_parser], help="adds a vnf into the catalogue")
+    vnf_create_parser.add_argument("file", action="store", help="location of the JSON file describing the VNF").completer = FilesCompleter
+    vnf_create_parser.add_argument("--name", action="store", help="name of the VNF (if it exists in the VNF descriptor, it is overwritten)")
+    vnf_create_parser.add_argument("--description", action="store", help="description of the VNF (if it exists in the VNF descriptor, it is overwritten)")
+    vnf_create_parser.add_argument("--image-path", action="store",  help="change image path locations (overwritten)")
+    vnf_create_parser.add_argument("--image-name", action="store",  help="change image name (overwritten)")
+    vnf_create_parser.add_argument("--image-checksum", action="store",  help="change image checksum (overwritten)")
+    vnf_create_parser.set_defaults(func=vnf_create)
+
+    vnf_list_parser = subparsers.add_parser('vnf-list', parents=[parent_parser], help="lists information about a vnf")
+    vnf_list_parser.add_argument("name", nargs='?', help="name of the VNF")
+    vnf_list_parser.add_argument("-a", "--all", action="store_true", help="shows all vnfs, not only the owned or public ones")
+    #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true")
+    vnf_list_parser.set_defaults(func=vnf_list)
+
+    vnf_delete_parser = subparsers.add_parser('vnf-delete', parents=[parent_parser], help="deletes a vnf from the catalogue")
+    vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted")
+    vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    vnf_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+    vnf_delete_parser.set_defaults(func=vnf_delete)
+
+    scenario_create_parser = subparsers.add_parser('scenario-create', parents=[parent_parser], help="adds a scenario into the OPENMANO DB")
+    scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario").completer = FilesCompleter
+    scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)")
+    scenario_create_parser.add_argument("--description", action="store", help="description of the scenario (if it exists in the YAML scenario, it is overwritten)")
+    scenario_create_parser.set_defaults(func=scenario_create)
+
+    scenario_list_parser = subparsers.add_parser('scenario-list', parents=[parent_parser], help="lists information about a scenario")
+    scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario")
+    #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true")
+    scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all scenarios, not only the owned or public ones")
+    scenario_list_parser.set_defaults(func=scenario_list)
+
+    scenario_delete_parser = subparsers.add_parser('scenario-delete', parents=[parent_parser], help="deletes a scenario from the OPENMANO DB")
+    scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted")
+    scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+    scenario_delete_parser.set_defaults(func=scenario_delete)
+
+    scenario_deploy_parser = subparsers.add_parser('scenario-deploy', parents=[parent_parser], help="deploys a scenario")
+    scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be deployed")
+    scenario_deploy_parser.add_argument("name", action="store", help="name of the instance")
+    scenario_deploy_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
+    scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
+    scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance")
+    scenario_deploy_parser.set_defaults(func=scenario_deploy)
+
+    scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)")
+    scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified")
+    scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
+    scenario_deploy_parser.set_defaults(func=scenario_verify)
+
+    instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', parents=[parent_parser], help="deploys a scenario")
+    instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text")
+    instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed")
+    instance_scenario_create_parser.add_argument("--name", action="store", help="name of the instance")
+    instance_scenario_create_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
+    instance_scenario_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
+    instance_scenario_create_parser.add_argument("--netmap-use", action="append", type=str, dest="netmap_use", help="indicates a datacenter network to map a scenario network 'scenario-network=datacenter-network'. Can be used several times")
+    instance_scenario_create_parser.add_argument("--netmap-create", action="append", type=str, dest="netmap_create", help="the scenario network must be created at datacenter 'scenario-network[=datacenter-network-name]' . Can be used several times")
+    instance_scenario_create_parser.add_argument("--keypair", action="append", type=str, dest="keypair", help="public key for ssh access. Format '[user:]key1[,key2...]'. Can be used several times")
+    instance_scenario_create_parser.add_argument("--keypair-auto", action="store_true", dest="keypair_auto", help="Inject the user ssh-keys found at $HOME/.ssh directory")
+    instance_scenario_create_parser.add_argument("--description", action="store", help="description of the instance")
+    instance_scenario_create_parser.set_defaults(func=instance_create)
+
+    instance_scenario_list_parser = subparsers.add_parser('instance-scenario-list', parents=[parent_parser], help="lists information about a scenario instance")
+    instance_scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario instance")
+    instance_scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all instance-scenarios, not only the owned")
+    instance_scenario_list_parser.set_defaults(func=instance_scenario_list)
+
+    instance_scenario_delete_parser = subparsers.add_parser('instance-scenario-delete', parents=[parent_parser], help="deletes a scenario instance (and deletes all VM and net instances in VIM)")
+    instance_scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario instance to be deleted")
+    instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    instance_scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+    instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete)
+
+    instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', parents=[parent_parser], help="invoke an action over part or the whole scenario instance")
+    instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
+    instance_scenario_action_parser.add_argument("action", action="store", type=str, \
+            choices=["start","pause","resume","shutoff","shutdown","forceOff","rebuild","reboot", "console", "add_public_key","vdu-scaling"],\
+            help="action to send")
+    instance_scenario_action_parser.add_argument("param", nargs='?', help="addional param of the action. e.g. console: novnc; reboot: type; vdu-scaling: '[{vdu-id: xxx, type: create|delete, count: 1}]'")
+    instance_scenario_action_parser.add_argument("--vnf", action="append", help="VNF to act on (can use several entries)")
+    instance_scenario_action_parser.add_argument("--vm", action="append", help="VM to act on (can use several entries)")
+    instance_scenario_action_parser.set_defaults(func=instance_scenario_action)
+
+    action_parser = subparsers.add_parser('action-list', parents=[parent_parser], help="get action over an instance status")
+    action_parser.add_argument("id", nargs='?', action="store", help="action id")
+    action_parser.add_argument("--instance", action="store", help="fitler by this instance_id")
+    action_parser.add_argument("--all", action="store", help="Not filter by tenant")
+    action_parser.set_defaults(func=get_action)
+
+    #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance")
+    #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
+    #instance_scenario_status_parser.set_defaults(func=instance_scenario_status)
+
+    tenant_create_parser = subparsers.add_parser('tenant-create', parents=[parent_parser], help="creates a new tenant")
+    tenant_create_parser.add_argument("name", action="store", help="name for the tenant")
+    tenant_create_parser.add_argument("--description", action="store", help="description of the tenant")
+    tenant_create_parser.set_defaults(func=tenant_create)
+
+    tenant_delete_parser = subparsers.add_parser('tenant-delete', parents=[parent_parser], help="deletes a tenant from the catalogue")
+    tenant_delete_parser.add_argument("name", action="store", help="name or uuid of the tenant to be deleted")
+    tenant_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    tenant_delete_parser.set_defaults(func=tenant_delete)
+
+    tenant_list_parser = subparsers.add_parser('tenant-list', parents=[parent_parser], help="lists information about a tenant")
+    tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant")
+    tenant_list_parser.set_defaults(func=tenant_list)
+
+    element_edit_parser = subparsers.add_parser('tenant-edit', parents=[parent_parser], help="edits one tenant")
+    element_edit_parser.add_argument("name", help="name or uuid of the tenant")
+    element_edit_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
+    element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+    element_edit_parser.set_defaults(func=element_edit, element='tenants')
+
+    datacenter_create_parser = subparsers.add_parser('datacenter-create', parents=[parent_parser], help="creates a new datacenter")
+    datacenter_create_parser.add_argument("name", action="store", help="name for the datacenter")
+    datacenter_create_parser.add_argument("url", action="store", help="url for the datacenter")
+    datacenter_create_parser.add_argument("--url_admin", action="store", help="url for administration for the datacenter")
+    datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)")
+    datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
+    datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter")
+    datacenter_create_parser.add_argument("--sdn-controller", action="store", help="Name or uuid of the SDN controller to be used", dest='sdn_controller')
+    datacenter_create_parser.set_defaults(func=datacenter_create)
+
+    datacenter_delete_parser = subparsers.add_parser('datacenter-delete', parents=[parent_parser], help="deletes a datacenter from the catalogue")
+    datacenter_delete_parser.add_argument("name", action="store", help="name or uuid of the datacenter to be deleted")
+    datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    datacenter_delete_parser.set_defaults(func=datacenter_delete)
+
+    datacenter_edit_parser = subparsers.add_parser('datacenter-edit', parents=[parent_parser], help="Edit datacenter")
+    datacenter_edit_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_edit_parser.add_argument("--file", help="json/yaml text or file with the changes").completer = FilesCompleter
+    datacenter_edit_parser.add_argument("--sdn-controller", action="store",
+                                          help="Name or uuid of the SDN controller to be used. Specify 'null' to clear entry", dest='sdn_controller')
+    datacenter_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
+    datacenter_edit_parser.set_defaults(func=datacenter_edit)
+
+    datacenter_list_parser = subparsers.add_parser('datacenter-list', parents=[parent_parser], help="lists information about a datacenter")
+    datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter")
+    datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant")
+    datacenter_list_parser.set_defaults(func=datacenter_list)
+
+    datacenter_attach_parser = subparsers.add_parser('datacenter-attach', parents=[parent_parser], help="associates a datacenter to the operating tenant")
+    datacenter_attach_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_attach_parser.add_argument('--vim-tenant-id', action='store', help="specify a datacenter tenant to use. A new one is created by default")
+    datacenter_attach_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
+    datacenter_attach_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
+    datacenter_attach_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
+    datacenter_attach_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
+    datacenter_attach_parser.set_defaults(func=datacenter_attach)
+
+    datacenter_edit_vim_tenant_parser = subparsers.add_parser('datacenter-edit-vim-tenant', parents=[parent_parser],
+                                                     help="Edit the association of a datacenter to the operating tenant")
+    datacenter_edit_vim_tenant_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-id', action='store',
+                                          help="specify a datacenter tenant to use. A new one is created by default")
+    datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
+    datacenter_edit_vim_tenant_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
+    datacenter_edit_vim_tenant_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
+    datacenter_edit_vim_tenant_parser.add_argument("--config", action="store",
+                                          help="aditional configuration in json/yaml format")
+    datacenter_edit_vim_tenant_parser.set_defaults(func=datacenter_edit_vim_tenant)
+
+    datacenter_detach_parser = subparsers.add_parser('datacenter-detach', parents=[parent_parser], help="removes the association between a datacenter and the operating tenant")
+    datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_detach_parser.add_argument("-a", "--all", action="store_true", help="removes all associations from this datacenter")
+    datacenter_detach_parser.set_defaults(func=datacenter_detach)
+
+    #=======================datacenter_sdn_port_mapping_xxx section=======================
+    #datacenter_sdn_port_mapping_set
+    datacenter_sdn_port_mapping_set_parser = subparsers.add_parser('datacenter-sdn-port-mapping-set',
+                                                                   parents=[parent_parser],
+                                                                   help="Load a file with the mapping of physical ports "
+                                                                        "and the ports of the dataplaneswitch controlled "
+                                                                        "by a datacenter")
+    datacenter_sdn_port_mapping_set_parser.add_argument("name", action="store", help="specifies the datacenter")
+    datacenter_sdn_port_mapping_set_parser.add_argument("file",
+                                                        help="json/yaml text or file with the port mapping").completer = FilesCompleter
+    datacenter_sdn_port_mapping_set_parser.add_argument("-f", "--force", action="store_true",
+                                                          help="forces overwriting without asking")
+    datacenter_sdn_port_mapping_set_parser.set_defaults(func=datacenter_sdn_port_mapping_set)
+
+    #datacenter_sdn_port_mapping_list
+    datacenter_sdn_port_mapping_list_parser = subparsers.add_parser('datacenter-sdn-port-mapping-list',
+                                                                    parents=[parent_parser],
+                                                                    help="Show the SDN port mapping in a datacenter")
+    datacenter_sdn_port_mapping_list_parser.add_argument("name", action="store", help="specifies the datacenter")
+    datacenter_sdn_port_mapping_list_parser.set_defaults(func=datacenter_sdn_port_mapping_list)
+
+    # datacenter_sdn_port_mapping_clear
+    datacenter_sdn_port_mapping_clear_parser = subparsers.add_parser('datacenter-sdn-port-mapping-clear',
+                                                                    parents=[parent_parser],
+                                                                    help="Clean the the SDN port mapping in a datacenter")
+    datacenter_sdn_port_mapping_clear_parser.add_argument("name", action="store",
+                                                         help="specifies the datacenter")
+    datacenter_sdn_port_mapping_clear_parser.add_argument("-f", "--force", action="store_true",
+                                              help="forces clearing without asking")
+    datacenter_sdn_port_mapping_clear_parser.set_defaults(func=datacenter_sdn_port_mapping_clear)
+    # =======================
+
+    # =======================sdn_controller_xxx section=======================
+    # sdn_controller_create
+    sdn_controller_create_parser = subparsers.add_parser('sdn-controller-create', parents=[parent_parser],
+                                                        help="Creates an SDN controller entity within RO")
+    sdn_controller_create_parser.add_argument("name", help="name of the SDN controller")
+    sdn_controller_create_parser.add_argument("--description", action="store", help="description of the SDN controller")
+    sdn_controller_create_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
+    sdn_controller_create_parser.add_argument("--port", action="store", help="Port of the SDN controller")
+    sdn_controller_create_parser.add_argument("--dpid", action="store",
+                                             help="DPID of the dataplane switch controlled by this SDN controller")
+    sdn_controller_create_parser.add_argument("--type", action="store",
+                                             help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
+    sdn_controller_create_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
+    sdn_controller_create_parser.add_argument("--passwd", action="store", dest='password',
+                                             help="password credentials for the SDN controller")
+    sdn_controller_create_parser.set_defaults(func=sdn_controller_create)
+
+    # sdn_controller_edit
+    sdn_controller_edit_parser = subparsers.add_parser('sdn-controller-edit', parents=[parent_parser],
+                                                        help="Update one or more options of a SDN controller")
+    sdn_controller_edit_parser.add_argument("name", help="name or uuid of the SDN controller", )
+    sdn_controller_edit_parser.add_argument("--name", action="store", help="Update the name of the SDN controller",
+                                              dest='new_name')
+    sdn_controller_edit_parser.add_argument("--description", action="store", help="description of the SDN controller")
+    sdn_controller_edit_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
+    sdn_controller_edit_parser.add_argument("--port", action="store", help="Port of the SDN controller")
+    sdn_controller_edit_parser.add_argument("--dpid", action="store",
+                                             help="DPID of the dataplane switch controlled by this SDN controller")
+    sdn_controller_edit_parser.add_argument("--type", action="store",
+                                             help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
+    sdn_controller_edit_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
+    sdn_controller_edit_parser.add_argument("--password", action="store",
+                                             help="password credentials for the SDN controller", dest='password')
+    sdn_controller_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
+    #TODO: include option --file
+    sdn_controller_edit_parser.set_defaults(func=sdn_controller_edit)
+
+    #sdn_controller_list
+    sdn_controller_list_parser = subparsers.add_parser('sdn-controller-list',
+                                                                    parents=[parent_parser],
+                                                                    help="List the SDN controllers")
+    sdn_controller_list_parser.add_argument("name", nargs='?', help="name or uuid of the SDN controller")
+    sdn_controller_list_parser.set_defaults(func=sdn_controller_list)
+
+    # sdn_controller_delete
+    sdn_controller_delete_parser = subparsers.add_parser('sdn-controller-delete',
+                                                                    parents=[parent_parser],
+                                                                    help="Delete the the SDN controller")
+    sdn_controller_delete_parser.add_argument("name", help="name or uuid of the SDN controller")
+    sdn_controller_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    sdn_controller_delete_parser.set_defaults(func=sdn_controller_delete)
+    # =======================
+
+    # WIM ======================= WIM section==================
+
+    # WIM create
+    wim_create_parser = subparsers.add_parser('wim-create',
+                                              parents=[parent_parser], help="creates a new wim")
+    wim_create_parser.add_argument("name", action="store",
+                                   help="name for the wim")
+    wim_create_parser.add_argument("url", action="store",
+                                   help="url for the wim")
+    wim_create_parser.add_argument("--type", action="store",
+                                   help="wim type: tapi, onos, dynpac or odl (default)")
+    wim_create_parser.add_argument("--config", action="store",
+                                   help="additional configuration in json/yaml format")
+    wim_create_parser.add_argument("--description", action="store",
+                                   help="description of the wim")
+    wim_create_parser.set_defaults(func=wim_create)
+
+    # WIM delete
+    wim_delete_parser = subparsers.add_parser('wim-delete',
+                                              parents=[parent_parser], help="deletes a wim from the catalogue")
+    wim_delete_parser.add_argument("name", action="store",
+                                   help="name or uuid of the wim to be deleted")
+    wim_delete_parser.add_argument("-f", "--force", action="store_true",
+                                   help="forces deletion without asking")
+    wim_delete_parser.set_defaults(func=wim_delete)
+
+    # WIM edit
+    wim_edit_parser = subparsers.add_parser('wim-edit',
+                                            parents=[parent_parser], help="edits a wim")
+    wim_edit_parser.add_argument("name", help="name or uuid of the wim")
+    wim_edit_parser.add_argument("--file",
+                                 help="json/yaml text or file with the changes")\
+                                .completer = FilesCompleter
+    wim_edit_parser.add_argument("-f", "--force", action="store_true",
+                                 help="do not prompt for confirmation")
+    wim_edit_parser.set_defaults(func=wim_edit)
+
+    # WIM list
+    wim_list_parser = subparsers.add_parser('wim-list',
+                                            parents=[parent_parser],
+                                            help="lists information about registered wims")
+    wim_list_parser.add_argument("name", nargs='?',
+                                 help="name or uuid of the wim")
+    wim_list_parser.add_argument("-a", "--all", action="store_true",
+                                 help="shows all wims, not only wims attached to tenant")
+    wim_list_parser.set_defaults(func=wim_list)
+
+    # WIM account create
+    wim_attach_parser = subparsers.add_parser('wim-account-create', parents=
+    [parent_parser], help="associates a wim account to the operating tenant")
+    wim_attach_parser.add_argument("name", help="name or uuid of the wim")
+    wim_attach_parser.add_argument('--account-name', action='store',
+                                   help="specify a name for the wim account.")
+    wim_attach_parser.add_argument("--user", action="store",
+                                   help="user credentials for the wim account")
+    wim_attach_parser.add_argument("--password", action="store",
+                                   help="password credentials for the wim account")
+    wim_attach_parser.add_argument("--config", action="store",
+                                   help="additional configuration in json/yaml format")
+    wim_attach_parser.set_defaults(func=wim_account_create)
+
+    # WIM account delete
+    wim_detach_parser = subparsers.add_parser('wim-account-delete',
+                                        parents=[parent_parser],
+                                        help="removes the association "
+                                                "between a wim account and the operating tenant")
+    wim_detach_parser.add_argument("name", help="name or uuid of the wim")
+    wim_detach_parser.add_argument("-a", "--all", action="store_true",
+                                   help="removes all associations from this wim")
+    wim_detach_parser.add_argument("-f", "--force", action="store_true",
+                                   help="forces delete without asking")
+    wim_detach_parser.set_defaults(func=wim_account_delete)
+
+    # WIM account edit
+    wim_attach_edit_parser = subparsers.add_parser('wim-account-edit', parents=
+    [parent_parser], help="modifies the association of a wim account to the operating tenant")
+    wim_attach_edit_parser.add_argument("name", help="name or uuid of the wim")
+    wim_attach_edit_parser.add_argument('--account-name', action='store',
+                                   help="specify a name for the wim account.")
+    wim_attach_edit_parser.add_argument("--user", action="store",
+                                   help="user credentials for the wim account")
+    wim_attach_edit_parser.add_argument("--password", action="store",
+                                   help="password credentials for the wim account")
+    wim_attach_edit_parser.add_argument("--config", action="store",
+                                   help="additional configuration in json/yaml format")
+    wim_attach_edit_parser.set_defaults(func=wim_account_edit)
+
+    # WIM port mapping set
+    wim_port_mapping_set_parser = subparsers.add_parser('wim-port-mapping-set',
+                                                        parents=[parent_parser],
+                                                        help="Load a file with the mappings "
+                                                                "of ports of a WAN switch that is "
+                                                                "connected to a PoP and the ports "
+                                                                "of the switch controlled by the PoP")
+    wim_port_mapping_set_parser.add_argument("name", action="store",
+                                             help="specifies the wim")
+    wim_port_mapping_set_parser.add_argument("file",
+                                             help="json/yaml text or file with the wim port mapping")\
+        .completer = FilesCompleter
+    wim_port_mapping_set_parser.add_argument("-f", "--force",
+                                             action="store_true", help="forces overwriting without asking")
+    wim_port_mapping_set_parser.set_defaults(func=wim_port_mapping_set)
+
+    # WIM port mapping list
+    wim_port_mapping_list_parser = subparsers.add_parser('wim-port-mapping-list',
+            parents=[parent_parser], help="Show the port mappings for a wim")
+    wim_port_mapping_list_parser.add_argument("name", action="store",
+                                              help="specifies the wim")
+    wim_port_mapping_list_parser.set_defaults(func=wim_port_mapping_list)
+
+    # WIM port mapping clear
+    wim_port_mapping_clear_parser = subparsers.add_parser('wim-port-mapping-clear',
+            parents=[parent_parser], help="Clean the port mapping in a wim")
+    wim_port_mapping_clear_parser.add_argument("name", action="store",
+                                               help="specifies the wim")
+    wim_port_mapping_clear_parser.add_argument("-f", "--force",
+                                               action="store_true",
+                                               help="forces clearing without asking")
+    wim_port_mapping_clear_parser.set_defaults(func=wim_port_mapping_clear)
+
+    # =======================================================
+
+    action_dict={'net-update': 'retrieves external networks from datacenter',
+                 'net-edit': 'edits an external network',
+                 'net-delete': 'deletes an external network',
+                 'net-list': 'lists external networks from a datacenter'
+                 }
+    for item in action_dict:
+        datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
+        datacenter_action_parser.add_argument("datacenter", help="name or uuid of the datacenter")
+        if item=='net-edit' or item=='net-delete':
+            datacenter_action_parser.add_argument("net", help="name or uuid of the datacenter net")
+        if item=='net-edit':
+            datacenter_action_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
+        if item!='net-list':
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        datacenter_action_parser.set_defaults(func=datacenter_net_action, action=item)
+
+
+    action_dict={'netmap-import': 'create network senario netmap base on the datacenter networks',
+                 'netmap-create': 'create a new network senario netmap',
+                 'netmap-edit':   'edit name of a network senario netmap',
+                 'netmap-delete': 'deletes a network scenario netmap (--all for clearing all)',
+                 'netmap-list':   'list/show network scenario netmaps'
+                 }
+    for item in action_dict:
+        datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
+        datacenter_action_parser.add_argument("--datacenter", help="name or uuid of the datacenter")
+        #if item=='net-add':
+        #    datacenter_action_parser.add_argument("net", help="name of the network")
+        if item=='netmap-delete':
+            datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to delete")
+            datacenter_action_parser.add_argument("--all", action="store_true", help="delete all netmap of this datacenter")
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        if item=='netmap-edit':
+            datacenter_action_parser.add_argument("netmap", help="name or uuid of the datacenter netmap do edit")
+            datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file with the changes").completer = FilesCompleter
+            datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap")
+            datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        if item=='netmap-list':
+            datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to show")
+        if item=='netmap-create':
+            datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file descriptor with the changes").completer = FilesCompleter
+            datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap, by default same as vim-name")
+            datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
+            datacenter_action_parser.add_argument('--vim-name', action='store', help="specify vim network name")
+        if item=='netmap-import':
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        datacenter_action_parser.set_defaults(func=datacenter_netmap_action, action=item)
+
+    # =======================vim_net_sdn_xxx section=======================
+    # vim_net_sdn_attach
+    vim_net_sdn_attach_parser = subparsers.add_parser('vim-net-sdn-attach',
+                                                      parents=[parent_parser],
+                                                      help="Specify the port to access to an external network using SDN")
+    vim_net_sdn_attach_parser.add_argument("vim_net", action="store",
+                                                help="Name/id of the network in the vim that will be used to connect to the external network")
+    vim_net_sdn_attach_parser.add_argument("port", action="store", help="Specifies the port in the dataplane switch to access to the external network")
+    vim_net_sdn_attach_parser.add_argument("--vlan", action="store", help="Specifies the vlan (if any) to use in the defined port")
+    vim_net_sdn_attach_parser.add_argument("--mac", action="store", help="Specifies the MAC (if known) of the physical device that will be reachable by this external port")
+    vim_net_sdn_attach_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+    vim_net_sdn_attach_parser.set_defaults(func=vim_net_sdn_attach)
+
+    # vim_net_sdn_detach
+    vim_net_sdn_detach_parser = subparsers.add_parser('vim-net-sdn-detach',
+                                                           parents=[parent_parser],
+                                                           help="Remove the port information to access to an external network using SDN")
+
+    vim_net_sdn_detach_parser.add_argument("vim_net", action="store", help="Name/id of the vim network")
+    vim_net_sdn_detach_parser.add_argument("--id", action="store",help="Specify the uuid of the external ports from this network to be detached")
+    vim_net_sdn_detach_parser.add_argument("--all", action="store_true", help="Detach all external ports from this network")
+    vim_net_sdn_detach_parser.add_argument("-f", "--force", action="store_true", help="forces clearing without asking")
+    vim_net_sdn_detach_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+    vim_net_sdn_detach_parser.set_defaults(func=vim_net_sdn_detach)
+    # =======================
+
+    for item in ("network", "tenant", "image"):
+        if item=="network":
+            command_name = 'vim-net'
+        else:
+            command_name = 'vim-'+item
+        vim_item_list_parser = subparsers.add_parser(command_name + '-list', parents=[parent_parser], help="list the vim " + item + "s")
+        vim_item_list_parser.add_argument("name", nargs='?', help="name or uuid of the " + item + "s")
+        vim_item_list_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+        vim_item_list_parser.set_defaults(func=vim_action, item=item, action="list")
+
+        vim_item_del_parser = subparsers.add_parser(command_name + '-delete', parents=[parent_parser], help="list the vim " + item + "s")
+        vim_item_del_parser.add_argument("name", help="name or uuid of the " + item + "s")
+        vim_item_del_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+        vim_item_del_parser.set_defaults(func=vim_action, item=item, action="delete")
+
+        if item == "network" or item == "tenant":
+            vim_item_create_parser = subparsers.add_parser(command_name + '-create', parents=[parent_parser], help="create a "+item+" at vim")
+            vim_item_create_parser.add_argument("file", nargs='?', help="descriptor of the {}. Must be a file or yaml/json text".format(item)).completer = FilesCompleter
+            vim_item_create_parser.add_argument("--name", action="store", help="name of the {}".format(item))
+            vim_item_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+            if item=="network":
+                vim_item_create_parser.add_argument("--type", action="store", help="type of network, data, ptp, bridge")
+                vim_item_create_parser.add_argument("--shared", action="store_true", help="Private or shared")
+                vim_item_create_parser.add_argument("--bind-net", action="store", help="For openvim datacenter type, net to be bind to, for vlan type, use sufix ':<vlan_tag>'")
+            else:
+                vim_item_create_parser.add_argument("--description", action="store", help="description of the {}".format(item))
+            vim_item_create_parser.set_defaults(func=vim_action, item=item, action="create")
+
+    argcomplete.autocomplete(main_parser)
+
+    try:
+        args = main_parser.parse_args()
+        #logging info
+        level = logging.CRITICAL
+        streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
+        if "debug" in args and args.debug:
+            level = logging.DEBUG
+        logging.basicConfig(format=streamformat, level= level)
+        logger = logging.getLogger('mano')
+        logger.setLevel(level)
+        # print("#TODO py3", args)
+        result = args.func(args)
+        if result == None:
+            result = 0
+        #for some reason it fails if call exit inside try instance. Need to call exit at the end !?
+    except (requests.exceptions.ConnectionError):
+        print("Connection error: not possible to contact OPENMANO-SERVER (openmanod)")
+        result = -2
+    except (KeyboardInterrupt):
+        print('Exiting openmano')
+        result = -3
+    except (SystemExit, ArgumentParserError):
+        result = -4
+    except (AttributeError):
+        print("Type '--help' for more information")
+        result = -4
+    except OpenmanoCLIError as e:
+        # print("#TODO py3", e)
+        print(e)
+        result = -5
+
+    # print(result)
+    exit(result)
+
+
+if __name__ == '__main__':
+    main()
+
diff --git a/RO-client/requirements.txt b/RO-client/requirements.txt
new file mode 100644 (file)
index 0000000..cd8e048
--- /dev/null
@@ -0,0 +1,18 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+argcomplete
+requests==2.*
+PyYAML
+
diff --git a/RO-client/setup.py b/RO-client/setup.py
new file mode 100644 (file)
index 0000000..d1748cd
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from setuptools import setup
+
+_name = "osm_roclient"
+# version is at first line of osm_roclient/html_public/version
+here = os.path.abspath(os.path.dirname(__file__))
+with open(os.path.join(here, 'README.rst')) as readme_file:
+    README = readme_file.read()
+
+setup(
+    name=_name,
+    description='OSM ro client',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    author_email='alfonso.tiernosepulveda@telefonica.com',
+    maintainer='Alfonso Tierno',
+    maintainer_email='alfonso.tiernosepulveda@telefonica.com',
+    url='https://osm.etsi.org/gitweb/?p=osm/LCM.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    # data_files=[('/etc/osm/', ['osm_roclient/lcm.cfg']),
+    #             ('/etc/systemd/system/', ['osm_roclient/osm-lcm.service']),
+    #             ],
+    install_requires=[
+        'PyYAML',
+        'requests==2.*',
+        'argcomplete',
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        "console_scripts": [
+            "openmano=osm_roclient.roclient:main"
+        ]
+    },
+)
diff --git a/RO-client/stdeb.cfg b/RO-client/stdeb.cfg
new file mode 100644 (file)
index 0000000..844e87e
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-argcomplete, python3-requests, python3-yaml 
diff --git a/RO-client/tox.ini b/RO-client/tox.ini
new file mode 100644 (file)
index 0000000..a8e7c3a
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_roclient --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_roclient.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+
diff --git a/RO/MANIFEST.in b/RO/MANIFEST.in
new file mode 100644 (file)
index 0000000..7251d31
--- /dev/null
@@ -0,0 +1,21 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+#         http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+#include MANIFEST.in
+#include requirements.txt
+include README.rst
+include requirements.txt
+include README.rst
+recursive-include osm_ro *
+
diff --git a/RO/Makefile b/RO/Makefile
new file mode 100644 (file)
index 0000000..b41748d
--- /dev/null
@@ -0,0 +1,120 @@
+# Copyright 2018 Telefonica S.A.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: all test clean
+
+SHELL := /bin/bash
+
+BRANCH ?= master
+
+all:  clean package
+
+clean:
+       rm -rf dist deb_dist osm_ro-*.tar.gz osm_ro.egg-info .eggs
+
+package:
+#      apt-get install -y python-stdeb
+       python3 setup.py --command-packages=stdeb.command sdist_dsc
+       cp debian/python3-osm-ro.postinst deb_dist/osm-ro*/debian/
+       cd deb_dist/osm-ro*/ && dpkg-buildpackage -rfakeroot -uc -us
+       # mkdir -p .build
+       # cp build/deb_dist/python-*.deb .build/
+
+clean_build:
+       rm -rf build
+       find osm_ro -name '*.pyc' -delete
+       find osm_ro -name '*.pyo' -delete
+
+prepare:
+#      ip install --user --upgrade setuptools
+       mkdir -p build/
+#      VER1=$(shell git describe | sed -e 's/^v//' |cut -d- -f1); \
+#      VER2=$(shell git describe | cut -d- -f2); \
+#      VER3=$(shell git describe | cut -d- -f3); \
+#      echo "$$VER1.dev$$VER2+$$VER3" > build/RO_VERSION
+       cp tox.ini build/
+       cp MANIFEST.in build/
+       cp requirements.txt build/
+       cp README.rst build/
+       cp setup.py build/
+       cp stdeb.cfg build/
+       cp -r osm_ro build/
+       cp openmano build/
+       cp openmanod build/
+       cp -r vnfs build/osm_ro
+       cp -r scenarios build/osm_ro
+       cp -r instance-scenarios build/osm_ro
+       cp -r scripts build/osm_ro
+       cp -r database_utils build/osm_ro
+       cp LICENSE build/osm_ro
+
+connectors: prepare
+       # python-novaclient is required for that
+       rm -f build/osm_ro/openmanolinkervimconn.py
+       cd build/osm_ro; for i in `ls vimconn_*.py |sed "s/\.py//"` ; do echo "import $$i" >> openmanolinkervimconn.py; done
+       python build/osm_ro/openmanolinkervimconn.py 2>&1
+       rm -f build/osm_ro/openmanolinkervimconn.py
+
+build: connectors prepare
+       python -m py_compile build/osm_ro/*.py
+#      cd build && tox -e flake8
+
+lib-openvim:
+       $(shell git clone https://osm.etsi.org/gerrit/osm/openvim)
+       LIB_BRANCH=$(shell git -C openvim branch -a|grep -oP 'remotes/origin/\K$(BRANCH)'); \
+       [ -z "$$LIB_BRANCH" ] && LIB_BRANCH='master'; \
+       echo "BRANCH: $(BRANCH)"; \
+       echo "LIB_OPENVIM_BRANCH: $$LIB_BRANCH"; \
+       git -C openvim checkout $$LIB_BRANCH
+       make -C openvim clean lite
+
+osm-im:
+       $(shell git clone https://osm.etsi.org/gerrit/osm/IM)
+       make -C IM clean all
+
+snap:
+       echo "Nothing to be done yet"
+
+install: lib-openvim osm-im
+       dpkg -i IM/deb_dist/python-osm-im*.deb
+       dpkg -i openvim/.build/python-lib-osm-openvim*.deb
+       dpkg -i .build/python-osm-ro*.deb
+       cd .. && \
+       OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` || FATAL "lib-osm-openvim was not properly installed" && \
+       OSMRO_PATH=`python -c 'import osm_ro; print osm_ro.__path__[0]'` || FATAL "osm-ro was not properly installed" && \
+       USER=root DEBIAN_FRONTEND=noninteractive $$OSMRO_PATH/database_utils/install-db-server.sh --updatedb || FATAL "osm-ro db installation failed" && \
+       USER=root DEBIAN_FRONTEND=noninteractive $$OSMLIBOVIM_PATH/database_utils/install-db-server.sh -u mano -p manopw -d mano_vim_db --updatedb || FATAL "lib-osm-openvim db installation failed"
+       service osm-ro restart
+
+develop: prepare
+#      pip install -r requirements.txt
+       cd build && ./setup.py develop
+
+test:
+       . ./test/basictest.sh -f --insert-bashrc --install-openvim --init-openvim
+       . ./test/basictest.sh -f reset add-openvim
+       ./test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast
+       ./test/test_RO.py vim  -t osm  -d local-openvim --timeout=30 --failfast
+
+build-docker-from-source:
+       docker build -t osm/openmano -f docker/Dockerfile-local .
+
+run-docker:
+       docker-compose -f docker/openmano-compose.yml up
+
+stop-docker:
+       docker-compose -f docker/openmano-compose.yml down
+
+
diff --git a/RO/README.rst b/RO/README.rst
new file mode 100644 (file)
index 0000000..3a2be88
--- /dev/null
@@ -0,0 +1,8 @@
+===========
+osm-ro
+===========
+
+osm-ro is the Resource Orchestrator for OSM, dealing with resource operations
+against different VIMs such as Openstack, VMware's vCloud Director, openvim
+and AWS.
+
diff --git a/RO/debian/python3-osm-ro.postinst b/RO/debian/python3-osm-ro.postinst
new file mode 100755 (executable)
index 0000000..02f356b
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-RO"
+# nothing to do
diff --git a/RO/osm_ro/__init__.py b/RO/osm_ro/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/RO/osm_ro/console_proxy_thread.py b/RO/osm_ro/console_proxy_thread.py
new file mode 100644 (file)
index 0000000..0c44899
--- /dev/null
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Implement like a proxy for TCP/IP in a separated thread.
+It creates two sockets to bypass the TCP/IP packets among the fix console 
+server specified at class construction (console_host, console_port)
+and a client that connect against the (host, port) specified also at construction
+
+                ---------------------           -------------------------------
+                |       OPENMANO     |          |         VIM                  |
+client 1  ----> | ConsoleProxyThread | ------>  |  Console server              |
+client 2  ----> |  (host, port)      | ------>  |(console_host, console_server)|
+   ...           --------------------            ------------------------------
+'''
+__author__="Alfonso Tierno"
+__date__ ="$19-nov-2015 09:07:15$"
+
+import socket
+import select
+import threading
+import logging
+
+
+class ConsoleProxyException(Exception):
+    '''raise when an exception has found''' 
+class ConsoleProxyExceptionPortUsed(ConsoleProxyException):
+    '''raise when the port is used''' 
+
+class ConsoleProxyThread(threading.Thread):
+    buffer_size = 4096
+    check_finish = 1 #frequency to check if requested to end in seconds
+
+    def __init__(self, host, port, console_host, console_port, log_level=None):
+        try:
+            threading.Thread.__init__(self)
+            self.console_host = console_host
+            self.console_port = console_port
+            self.host = host
+            self.port = port
+            self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            self.server.bind((host, port))
+            self.server.listen(200)
+            #TODO timeout in a lock section can be used to autoterminate the thread
+            #when inactivity and timeout<time : set timeout=0 and terminate
+            #from outside, close class when timeout==0; set timeout=time+120 when adding a new console on this thread
+            #set self.timeout = time.time() + 120 at init
+            self.name = "ConsoleProxy " + console_host + ":" + str(console_port)
+            self.input_list = [self.server]
+            self.channel = {}
+            self.terminate = False #put at True from outside to force termination
+            self.logger = logging.getLogger('openmano.console')
+            if log_level:
+                self.logger.setLevel( getattr(logging, log_level) )
+
+        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+            if e is socket.error and e.errno==98:
+                raise ConsoleProxyExceptionPortUsed("socket.error " + str(e))
+            raise ConsoleProxyException(type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0])) )
+        
+    def run(self):
+        while True:
+            try:
+                inputready, _, _ = select.select(self.input_list, [], [], self.check_finish)
+            except select.error as e:
+                self.logger.error("Exception on select %s: %s", type(e).__name__, str(e) )
+                self.on_terminate()
+
+            if self.terminate:
+                self.on_terminate()
+                self.logger.debug("Terminate because commanded")
+                break
+            
+            for sock in inputready:
+                if sock == self.server:
+                    self.on_accept()
+                else:
+                    self.on_recv(sock)
+                    
+    def on_terminate(self):
+        while self.input_list:
+            if self.input_list[0] is self.server:
+                self.server.close()
+                del self.input_list[0]
+            else:
+                self.on_close(self.input_list[0], "Terminating thread")
+
+    def on_accept(self):
+        #accept
+        try:
+            clientsock, clientaddr = self.server.accept()
+        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+            self.logger.error("Exception on_accept %s: %s", type(e).__name__, str(e) )
+            return False
+        #print self.name, ": Accept new client ", clientaddr
+
+        #connect
+        try:
+            forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            forward.connect((self.console_host, self.console_port))
+            name = "{}:{} => ({}:{} => {}:{}) => {}:{}".format(
+                *clientsock.getpeername(), *clientsock.getsockname(), *forward.getsockname(), *forward.getpeername() )
+            self.logger.warning("new connection " + name)
+                
+            self.input_list.append(clientsock)
+            self.input_list.append(forward)
+            info = { "name": name,
+                    "clientsock" : clientsock,
+                    "serversock" : forward
+                    }
+            self.channel[clientsock] = info
+            self.channel[forward] = info
+            return True
+        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+            self.logger.error("Exception on_connect to server %s:%d; %s: %s  Close client side %s",
+                self.console_host, self.console_port, type(e).__name__, str(e), str(clientaddr) )
+            clientsock.close()
+            return False
+
+    def on_close(self, sock, cause):
+        if sock not in self.channel:
+            return  #can happen if there is data ready to received at both sides and the channel has been deleted. QUITE IMPROBABLE but just in case
+        info = self.channel[sock]
+        # debug info
+        sockname = "client" if sock is info["clientsock"] else "server"
+        self.logger.warning("del connection %s %s at %s side", info["name"], str(cause), str(sockname))
+        # close sockets
+        try:
+            # close the connection with client
+            info["clientsock"].close()  # equivalent to do self.s.close()
+        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+            self.logger.error("Exception on_close client socket %s: %s", type(e).__name__, str(e))
+        try:
+            # close the connection with remote server
+            info["serversock"].close()
+        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+            self.logger.error("Exception on_close server socket %s: %s", type(e).__name__, str(e) )
+        
+        # remove objects from input_list
+        self.input_list.remove(info["clientsock"])
+        self.input_list.remove(info["serversock"])
+        # delete both objects from channel dict
+        del self.channel[info["clientsock"]]
+        del self.channel[info["serversock"]]
+
+    def on_recv(self, sock):
+        if sock not in self.channel:
+            return  # can happen if there is data ready to received at both sides and the channel has been deleted. QUITE IMPROBABLE but just in case
+        info = self.channel[sock]
+        peersock = info["serversock"] if sock is info["clientsock"] else info["clientsock"]
+        try:
+            data = sock.recv(self.buffer_size)
+            if len(data) == 0:
+                self.on_close(sock, "peer closed")
+            else:
+                # print self.data
+                sock = peersock
+                peersock.send(data)
+        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
+            # print(self.name, ": Exception {}: {}".format(type(e).__name__, e))
+            self.on_close(sock, "Exception {}: {}".format(type(e).__name__, e))
+
+        
+
+    #def start_timeout(self):
+    #    self.timeout = time.time() + 120
+        
diff --git a/RO/osm_ro/database_utils/dump_db.sh b/RO/osm_ro/database_utils/dump_db.sh
new file mode 100755 (executable)
index 0000000..89c83f0
--- /dev/null
@@ -0,0 +1,147 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+LICENSE_HEAD='/**
+* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
+* This file is part of openmano
+* All Rights Reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+*
+* For those usages not covered by the Apache License, Version 2.0 please
+* contact with: nfvlabs@tid.es
+**/
+'
+
+DBUSER="mano"
+DBPASS=""
+DBHOST="localhost"
+DBPORT="3306"
+DBNAME="mano_db"
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+DIRNAME=`dirname $(readlink -f $0)`
+function usage(){
+    echo -e "Usage: $0 OPTIONS"
+    echo -e "  Dumps openmano database content"
+    echo -e "  OPTIONS"
+    echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
+    echo -e "     -p PASS  database password. 'No password' by default. Prompts if DB access fails"
+    echo -e "     -P PORT  database port. '$DBPORT' by default"
+    echo -e "     -h HOST  database host. '$DBHOST' by default"
+    echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
+    echo -e "     --help   shows this help"
+}
+
+while getopts ":u:p:P:h:-:" o; do
+    case "${o}" in
+        u)
+            DBUSER="$OPTARG"
+            ;;
+        p)
+            DBPASS="$OPTARG"
+            ;;
+        P)
+            DBPORT="$OPTARG"
+            ;;
+        d)
+            DBNAME="$OPTARG"
+            ;;
+        h)
+            DBHOST="$OPTARG"
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            echo "Invalid option: --$OPTARG" >&2 && usage  >&2
+            exit 1
+            ;;
+        \?)
+            echo "Invalid option: -$OPTARG" >&2 && usage  >&2
+            exit 1
+            ;;
+        :)
+            echo "Option -$OPTARG requires an argument." >&2 && usage  >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit -1
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+#check and ask for database user password
+DBUSER_="-u$DBUSER"
+DBPASS_=""
+[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+DBHOST_="-h$DBHOST"
+DBPORT_="-P$DBPORT"
+while !  echo ";" | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME >/dev/null 2>&1
+do
+        [ -n "$logintry" ] &&  echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
+        [ -z "$logintry" ] &&  echo -e "\nProvide database name and credentials"
+        read -e -p "mysql database name($DBNAME): " KK
+        [ -n "$KK" ] && DBNAME="$KK"
+        read -e -p "mysql user($DBUSER): " KK
+        [ -n "$KK" ] && DBUSER="$KK" && DBUSER_="-u$DBUSER"
+        read -e -s -p "mysql password: " DBPASS
+        [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+        [ -z "$DBPASS" ] && DBPASS_=""
+        logintry="yes"
+        echo
+done
+
+#echo structure, including the content of schema_version
+echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_structure.sql
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-data --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_structure.sql
+echo -e "\n\n\n\n" >> ${DIRNAME}/${DBNAME}_structure.sql
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME --tables schema_version 2>/dev/null  >> ${DIRNAME}/${DBNAME}_structure.sql
+echo "    ${DIRNAME}/${DBNAME}_structure.sql"
+
+#echo only data
+echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_data.sql #copy my own header
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME >> ${DIRNAME}/${DBNAME}_data.sql
+echo "    ${DIRNAME}/${DBNAME}_data.sql"
+
+#echo all
+echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_all.sql #copy my own header
+mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_all.sql
+echo "    ${DIRNAME}/${DBNAME}_all.sql"
+
diff --git a/RO/osm_ro/database_utils/init_mano_db.sh b/RO/osm_ro/database_utils/init_mano_db.sh
new file mode 100755 (executable)
index 0000000..147ea38
--- /dev/null
@@ -0,0 +1,170 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+DBUSER="mano"
+DBPASS=""
+DEFAULT_DBPASS="manopw"
+DBHOST=""
+DBPORT="3306"
+DBNAME="mano_db"
+QUIET_MODE=""
+CREATEDB=""
+
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+DIRNAME=`dirname $(readlink -f $0)`
+
+function usage(){
+    echo -e "Usage: $0 OPTIONS [version]"
+    echo -e "  Inits openmano database; deletes previous one and loads from ${DBNAME}_structure.sql"\
+    echo -e "   and data from host_ranking.sql, nets.sql, of_ports_pci_correspondece*.sql"
+            "If [version]  is not provided, it is upgraded to the last version"
+    echo -e "  OPTIONS"
+    echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
+    echo -e "     -p PASS  database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
+    echo -e "     -P PORT  database port. '$DBPORT' by default"
+    echo -e "     -h HOST  database host. 'localhost' by default"
+    echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
+    echo -e "     -q --quiet: Do not prompt for credentials and exit if cannot access to database"
+    echo -e "     --createdb   forces the deletion and creation of the database"
+    echo -e "     --help   shows this help"
+}
+
+while getopts ":u:p:P:h:d:q-:" o; do
+    case "${o}" in
+        u)
+            DBUSER="$OPTARG"
+            ;;
+        p)
+            DBPASS="$OPTARG"
+            ;;
+        P)
+            DBPORT="$OPTARG"
+            ;;
+        d)
+            DBNAME="$OPTARG"
+            ;;
+        h)
+            DBHOST="$OPTARG"
+            ;;
+        q)
+            export QUIET_MODE="-q"
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE="-q" && continue
+            [ "${OPTARG}" == "createdb" ] && export CREATEDB=yes && continue
+            echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
+            exit 1
+            ;;
+        \?)
+            echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit 1
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+DB_VERSION=$1
+
+if [ -n "$DB_VERSION" ] ; then
+    # check it is a number and an allowed one
+    [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null || 
+        ! echo "parameter 'version' requires a integer value" >&2 || exit 1
+fi
+
+# Creating temporary file
+TEMPFILE="$(mktemp -q --tmpdir "initdb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT
+chmod 0600 "$TEMPFILE"
+DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
+echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+
+if [ -n "${CREATEDB}" ] ; then
+    FIRST_TRY="yes"
+    while ! DB_ERROR=`mysqladmin "$DEF_EXTRA_FILE_PARAM" -s status 2>&1 >/dev/null` ; do
+        # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+        [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+            echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+            continue
+        echo "$DB_ERROR"
+        [[ -n "$QUIET_MODE" ]] && echo -e "Invalid admin database credentials!!!" >&2 && exit 1
+        echo -e "Provide database credentials (Ctrl+c to abort):"
+        read -e -p "    mysql user($DBUSER): " KK
+        [ -n "$KK" ] && DBUSER="$KK"
+        read -e -s -p "    mysql password: " DBPASS
+        echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+        FIRST_TRY=""
+        echo
+    done
+    # echo "    deleting previous database ${DBNAME} if it exists"
+    mysqladmin $DEF_EXTRA_FILE_PARAM DROP "${DBNAME}" -f && echo "Previous database deleted"
+    echo "    creating database ${DBNAME}"
+    mysqladmin $DEF_EXTRA_FILE_PARAM create "${DBNAME}" || exit 1
+fi
+
+# Check and ask for database user password
+FIRST_TRY="yes"
+while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
+do
+    # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+    [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+        echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+        continue
+    echo "$DB_ERROR"
+    [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
+    echo -e "Provide database name and credentials (Ctrl+c to abort):"
+    read -e -p "    mysql database name($DBNAME): " KK
+    [ -n "$KK" ] && DBNAME="$KK"
+    read -e -p "    mysql user($DBUSER): " KK
+    [ -n "$KK" ] && DBUSER="$KK"
+    read -e -s -p "    mysql password: " DBPASS
+    echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+    FIRST_TRY=""
+    echo
+done
+
+DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
+DBUSER_="" && [ -n "$DBUSER" ] && DBUSER_="-u$DBUSER"
+DBPASS_="" && [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
+DBHOST_="" && [ -n "$DBHOST" ] && DBHOST_="-h$DBHOST"
+DBPORT_="-P$DBPORT"
+
+echo "    loading ${DIRNAME}/mano_db_structure.sql"
+sed -e "s/{{mano_db}}/$DBNAME/" ${DIRNAME}/mano_db_structure.sql | mysql $DEF_EXTRA_FILE_PARAM ||
+    ! echo "ERROR at init $DBNAME" || exit 1
+
+echo "    migrage database version"
+# echo "${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION"
+${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION
+
diff --git a/RO/osm_ro/database_utils/install-db-server.sh b/RO/osm_ro/database_utils/install-db-server.sh
new file mode 100755 (executable)
index 0000000..36b8003
--- /dev/null
@@ -0,0 +1,296 @@
+#!/usr/bin/env bash
+
+DB_NAME='mano_db'
+DB_ADMIN_USER="root"
+DB_USER="mano"
+DB_PASS="manopw"
+DB_ADMIN_PASSWD=""
+DB_PORT="3306"
+DB_HOST=""
+DB_HOST_PARAM=""
+QUIET_MODE=""
+FORCEDB=""
+UPDATEDB=""
+NO_PACKAGES=""
+UNINSTALL=""
+
+
+function usage(){
+    echo -e "usage: sudo $0 [OPTIONS]"
+    echo -e "Install openmano database server and the needed packages"
+    echo -e "  OPTIONS"
+    echo -e "     -U USER:    database admin user. '$DB_ADMIN_USER' by default. Prompts if needed"
+    echo -e "     -P PASS:    database admin password to be used or installed. Prompts if needed"
+    echo -e "     -d: database name, '$DB_NAME' by default"
+    echo -e "     -u: database user, '$DB_USER' by default"
+    echo -e "     -p: database pass, '$DB_PASS' by default"
+    echo -e "     -H: HOST  database host. 'localhost' by default"
+    echo -e "     -T: PORT  database port. '$DB_PORT' by default"
+    echo -e "     -q --quiet: install in unattended mode"
+    echo -e "     -h --help:  show this help"
+    echo -e "     --forcedb:  if database exists, it is dropped and a new one is created"
+    echo -e "     --updatedb: if database exists, it preserves the content and it is updated to the needed version"
+    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+    echo -e "     --unistall: delete database"
+}
+
+function ask_user(){
+    # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
+    # Params: $1 text to ask;   $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
+    # Return: true(0) if user type 'yes'; false (1) if user type 'no'
+    read -e -p "$1" USER_CONFIRMATION
+    while true ; do
+        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
+        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
+        [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
+        [ "${USER_CONFIRMATION,,}" == "no" ]  || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
+        read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
+    done
+}
+
+function install_packages(){
+    [ -x /usr/bin/apt-get ] && apt-get install -y $*
+    [ -x /usr/bin/yum ]     && yum install     -y $*   
+    
+    #check properly installed
+    for PACKAGE in $*
+    do
+        PACKAGE_INSTALLED="no"
+        [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE            &>> /dev/null && PACKAGE_INSTALLED="yes"
+        [ -x /usr/bin/yum ]     && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" 
+        if [ "$PACKAGE_INSTALLED" = "no" ]
+        then
+            echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
+            exit 1
+       fi
+    done
+}
+
+function _install_mysql_package(){
+    echo '
+    #################################################################
+    #####               INSTALL REQUIRED PACKAGES               #####
+    #################################################################'
+    [ "$_DISTRO" == "Ubuntu" ] && ! install_packages "mysql-server" && exit 1
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && ! install_packages "mariadb mariadb-server" && exit 1
+
+    if [[ "$_DISTRO" == "Ubuntu" ]]
+    then
+        #start services. By default CentOS does not start services
+        service mysql start >> /dev/null
+        # try to set admin password, ignore if fails
+        [[ -n $DBPASSWD ]] && mysqladmin -u $DB_ADMIN_USER -s password $DB_ADMIN_PASSWD
+    fi
+
+    if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
+    then
+        #start services. By default CentOS does not start services
+        service mariadb start
+        service httpd   start
+        systemctl enable mariadb
+        systemctl enable httpd
+        ask_user "Do you want to configure mariadb (recommended if not done before) (Y/n)? " y &&
+            mysql_secure_installation
+
+        ask_user "Do you want to set firewall to grant web access port 80,443  (Y/n)? " y &&
+            firewall-cmd --permanent --zone=public --add-service=http &&
+            firewall-cmd --permanent --zone=public --add-service=https &&
+            firewall-cmd --reload
+    fi
+}
+
+function _create_db(){
+    echo '
+    #################################################################
+    #####        CREATE AND INIT DATABASE                       #####
+    #################################################################'
+    echo "mysqladmin --defaults-extra-file="$TEMPFILE" -s create ${DB_NAME}"
+    mysqladmin --defaults-extra-file="$TEMPFILE" -s create ${DB_NAME} \
+        || ! echo "Error creating ${DB_NAME} database" >&2 \
+        || exit 1
+    echo "CREATE USER $DB_USER@'localhost' IDENTIFIED BY '$DB_PASS';"   | mysql --defaults-extra-file="$TEMPFILE" -s 2>/dev/null \
+        || echo "Warning: User '$DB_USER' cannot be created at database. Probably exist" >&2
+    echo "GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '$DB_USER'@'localhost';" | mysql --defaults-extra-file="$TEMPFILE" -s \
+        || ! echo "Error: Granting privileges to user '$DB_USER' at database" >&2 \
+        || exit 1
+    echo " Database '${DB_NAME}' created, user '$DB_USER' password '$DB_PASS'"
+    DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+    ${DIRNAME}/init_mano_db.sh -u"$DB_USER" -p"$DB_PASS" -d"$DB_NAME" -P"$DB_PORT" $DB_HOST_PARAM \
+        || ! echo "Error initializing database '$DB_NAME'" >&2 \
+        || exit 1
+}
+
+function _delete_db(){
+   mysqladmin --defaults-extra-file="$TEMPFILE" -s drop "${DB_NAME}" $DBDELETEPARAM \
+       || ! echo "Error: Could not delete '${DB_NAME}' database" >&2 \
+       || exit 1
+}
+
+function _update_db(){
+    echo '
+    #################################################################
+    #####        UPDATE DATABASE                                #####
+    #################################################################'
+    echo "CREATE USER $DB_USER@'localhost' IDENTIFIED BY '$DB_PASS';" | mysql --defaults-extra-file="$TEMPFILE" -s 2>/dev/null \
+        || echo "Warning: User '$DB_USER' cannot be created at database. Probably exist" >&2
+    echo "GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '$DB_USER'@'localhost';" | mysql --defaults-extra-file="$TEMPFILE" -s \
+        || ! echo "Error: Granting privileges to user '$DB_USER' at database" >&2 \
+        || exit 1
+    echo " Granted privileges to user '$DB_USER' password '$DB_PASS' to existing database '${DB_NAME}'"
+    DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+    ${DIRNAME}/migrate_mano_db.sh -u"$DB_USER" -p"$DB_PASS" -d"$DB_NAME" -P"$DB_PORT" $DB_HOST_PARAM \
+        || ! echo "Error updating database '$DB_NAME'" >&2 \
+        || exit 1
+}
+
+function _uninstall_db(){
+echo '
+    #################################################################
+    #####        DELETE DATABASE                                #####
+    #################################################################'
+    DBDELETEPARAM=""
+    [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
+    _delete_db
+}
+
+function db_exists(){  # (db_name, credential_file)
+    # check credentials
+    mysqlshow --defaults-extra-file="$2" >/dev/null  || exit 1
+    if mysqlshow --defaults-extra-file="$2" | grep -v Wildcard | grep -w -q $1
+    then
+        # echo " DB $1 exists"
+        return 0
+    fi
+    # echo " DB $1 does not exist"
+    return 1
+}
+
+while getopts ":U:P:d:u:p:H:T:hiq-:" o; do
+    case "${o}" in
+        U)
+            export DB_ADMIN_USER="$OPTARG"
+            ;;
+        P)
+            export DB_ADMIN_PASSWD="$OPTARG"
+            ;;
+        d)
+            export DB_NAME="$OPTARG"
+            ;;
+        u)
+            export DB_USER="$OPTARG"
+            ;;
+        p)
+            export DB_PASS="$OPTARG"
+            ;;
+        H)
+            export DB_HOST="$OPTARG"
+            export DB_HOST_PARAM="-h$DB_HOST"
+            ;;
+        T)
+            export DB_PORT="$OPTARG"
+            ;;
+        q)
+            export QUIET_MODE=yes
+            export DEBIAN_FRONTEND=noninteractive
+            ;;
+        h)
+            usage && exit 0
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "forcedb" ] && FORCEDB="y" && continue
+            [ "${OPTARG}" == "updatedb" ] && UPDATEDB="y" && continue
+            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+            [ "${OPTARG}" == "uninstall" ] &&  UNINSTALL="y" && continue
+            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        \?)
+            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit 1
+            ;;
+    esac
+done
+if [ -n "$FORCEDB" ] && [ -n "$UPDATEDB" ] ; then
+    echo "Error: options --forcedb and --updatedb are mutually exclusive" >&2
+    exit 1
+fi
+
+# Discover Linux distribution
+# try redhat type
+[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut  -d" " -f1)
+# if not assuming ubuntu type
+[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is  2>/dev/null)
+
+if [[ -z "$NO_PACKAGES" ]]
+then
+    [ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
+    _install_mysql_package || exit 1
+fi
+
+# Creating temporary file for MYSQL installation and initialization"
+TEMPFILE="$(mktemp -q --tmpdir "installdb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT
+chmod 0600 "$TEMPFILE"
+echo -e "[client]\n user='${DB_ADMIN_USER}'\n password='$DB_ADMIN_PASSWD'\n host='$DB_HOST'\n port='$DB_PORT'" > "$TEMPFILE"
+
+#check and ask for database user password. Must be done after database installation
+if [[ -z $QUIET_MODE ]]
+then
+    echo -e "\nCheking database connection and ask for credentials"
+    # echo "mysqladmin --defaults-extra-file=$TEMPFILE -s status >/dev/null"
+    while ! mysqladmin --defaults-extra-file="$TEMPFILE" -s status >/dev/null
+    do
+        [ -n "$logintry" ] &&  echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
+        [ -z "$logintry" ] &&  echo -e "\nProvide database credentials"
+        read -e -p "database admin user? ($DB_ADMIN_USER) " DBUSER_
+        [ -n "$DBUSER_" ] && DB_ADMIN_USER=$DBUSER_
+        read -e -s -p "database admin password? (Enter for not using password) " DBPASSWD_
+        [ -n "$DBPASSWD_" ] && DB_ADMIN_PASSWD="$DBPASSWD_"
+        [ -z "$DBPASSWD_" ] && DB_ADMIN_PASSWD=""
+        echo -e "[client]\n user='${DB_ADMIN_USER}'\n password='$DB_ADMIN_PASSWD'\n host='$DB_HOST'\n port='$DB_PORT'" > "$TEMPFILE"
+        logintry="yes"
+    done
+fi
+
+if [[ ! -z "$UNINSTALL" ]]
+then
+    _uninstall_db
+    exit
+fi
+
+# Create or update database
+if db_exists $DB_NAME $TEMPFILE ; then
+    if [[ -n $FORCEDB ]] ; then
+        # DBDELETEPARAM=""
+        # [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
+        DBDELETEPARAM="-f"
+        _delete_db
+        _create_db
+    elif [[ -n $UPDATEDB ]] ; then
+        _update_db
+    elif [[ -z $QUIET_MODE ]] ; then
+        echo "database '$DB_NAME' exist. Reinstall it?"
+        if ask_user "Type 'y' to drop and reinstall existing database (content will be lost), Type 'n' to update existing database (y/N)? " n ; then
+            _delete_db
+            _create_db
+        else
+            _update_db
+        fi
+    else
+        echo "Database '$DB_NAME' exists. Use option '--forcedb' to force the deletion of the existing one, or '--updatedb' to use existing one and update it"
+        exit 1
+    fi
+else
+    _create_db
+fi
+
diff --git a/RO/osm_ro/database_utils/mano_db_structure.sql b/RO/osm_ro/database_utils/mano_db_structure.sql
new file mode 100644 (file)
index 0000000..9e2d911
--- /dev/null
@@ -0,0 +1,1240 @@
+/**
+* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
+* This file is part of openmano
+* All Rights Reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License"); you may
+* not use this file except in compliance with the License. You may obtain
+* a copy of the License at
+*
+*         http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations
+* under the License.
+*
+* For those usages not covered by the Apache License, Version 2.0 please
+* contact with: nfvlabs@tid.es
+**/
+
+-- MySQL dump 10.13  Distrib 5.7.24, for Linux (x86_64)
+--
+-- Host: localhost    Database: {{mano_db}}
+-- ------------------------------------------------------
+-- Server version      5.7.24
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Current Database: `{{mano_db}}`
+--
+
+/*!40000 DROP DATABASE IF EXISTS `{{mano_db}}`*/;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `{{mano_db}}` /*!40100 DEFAULT CHARACTER SET utf8 */;
+
+USE `{{mano_db}}`;
+
+--
+-- Table structure for table `datacenter_nets`
+--
+
+DROP TABLE IF EXISTS `datacenter_nets`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenter_nets` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `vim_net_id` varchar(36) NOT NULL,
+  `datacenter_id` varchar(36) NOT NULL,
+  `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
+  `shared` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If can be shared with serveral scenarios',
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name_datacenter_id` (`name`,`datacenter_id`),
+  KEY `FK_datacenter_nets_datacenters` (`datacenter_id`),
+  CONSTRAINT `FK_datacenter_nets_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contain the external nets of a datacenter';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenter_tenants`
+--
+
+DROP TABLE IF EXISTS `datacenter_tenants`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenter_tenants` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) DEFAULT NULL,
+  `datacenter_id` varchar(36) NOT NULL COMMENT 'Datacenter of this tenant',
+  `vim_tenant_name` varchar(256) DEFAULT NULL,
+  `vim_tenant_id` varchar(256) DEFAULT NULL COMMENT 'Tenant ID at VIM',
+  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if this tenant has been created by openmano, or it existed on VIM',
+  `user` varchar(64) DEFAULT NULL,
+  `passwd` varchar(64) DEFAULT NULL,
+  `config` varchar(4000) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_vim_tenants_datacenters` (`datacenter_id`),
+  CONSTRAINT `FK_vim_tenants_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenters`
+--
+
+DROP TABLE IF EXISTS `datacenters`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenters` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `type` varchar(36) NOT NULL DEFAULT 'openvim',
+  `vim_url` varchar(150) NOT NULL COMMENT 'URL of the VIM for the REST API',
+  `vim_url_admin` varchar(150) DEFAULT NULL,
+  `config` varchar(4000) DEFAULT NULL COMMENT 'extra config information in json',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name` (`name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Datacenters managed by the NFVO.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenters_flavors`
+--
+
+DROP TABLE IF EXISTS `datacenters_flavors`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenters_flavors` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `flavor_id` varchar(36) NOT NULL,
+  `datacenter_vim_id` varchar(36) NOT NULL,
+  `vim_id` varchar(36) NOT NULL,
+  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `vim_info` text,
+  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if it has been created by openmano, or already existed',
+  `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description json format of additional devices',
+  PRIMARY KEY (`id`),
+  KEY `FK__flavors` (`flavor_id`),
+  KEY `FK_datacenters_flavors_datacenter_tenants` (`datacenter_vim_id`),
+  CONSTRAINT `FK__flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_datacenters_flavors_datacenter_tenants` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `datacenters_images`
+--
+
+DROP TABLE IF EXISTS `datacenters_images`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `datacenters_images` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `image_id` varchar(36) NOT NULL,
+  `datacenter_vim_id` varchar(36) NOT NULL,
+  `vim_id` varchar(36) NOT NULL,
+  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `vim_info` text,
+  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if it has been created by openmano, or already existed',
+  PRIMARY KEY (`id`),
+  KEY `FK__images` (`image_id`),
+  KEY `FK_datacenters_images_datacenter_tenants` (`datacenter_vim_id`),
+  CONSTRAINT `FK__images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_datacenters_images_datacenter_tenants` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `flavors`
+--
+
+DROP TABLE IF EXISTS `flavors`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `flavors` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `disk` smallint(5) unsigned DEFAULT NULL,
+  `ram` mediumint(7) unsigned DEFAULT NULL,
+  `vcpus` smallint(5) unsigned DEFAULT NULL,
+  `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
+  PRIMARY KEY (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `images`
+--
+
+DROP TABLE IF EXISTS `images`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `images` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `universal_name` varchar(255) DEFAULT NULL,
+  `checksum` varchar(32) DEFAULT NULL,
+  `location` varchar(200) DEFAULT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `metadata` varchar(2000) DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `location` (`location`),
+  UNIQUE KEY `universal_name_checksum` (`universal_name`,`checksum`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_actions`
+--
+
+DROP TABLE IF EXISTS `instance_actions`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_actions` (
+  `uuid` varchar(36) NOT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `instance_id` varchar(36) DEFAULT NULL,
+  `description` varchar(64) DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
+  `number_tasks` smallint(6) NOT NULL DEFAULT '1',
+  `number_done` smallint(6) NOT NULL DEFAULT '0',
+  `number_failed` smallint(6) NOT NULL DEFAULT '0',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_actions_tenants` (`tenant_id`),
+  CONSTRAINT `FK_actions_tenant` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contains client actions over instances';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_classifications`
+--
+
+DROP TABLE IF EXISTS `instance_classifications`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_classifications` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_scenario_id` varchar(36) NOT NULL,
+  `vim_classification_id` varchar(36) DEFAULT NULL,
+  `sce_classifier_match_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) DEFAULT NULL,
+  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `vim_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_classifications_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_classifications_sce_classifier_matches` (`sce_classifier_match_id`),
+  KEY `FK_instance_classifications_datacenters` (`datacenter_id`),
+  KEY `FK_instance_classifications_datacenter_tenants` (`datacenter_tenant_id`),
+  CONSTRAINT `FK_instance_classifications_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_classifications_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_classifications_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_classifications_sce_classifier_matches` FOREIGN KEY (`sce_classifier_match_id`) REFERENCES `sce_classifier_matches` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_interfaces`
+--
+
+DROP TABLE IF EXISTS `instance_interfaces`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_interfaces` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_vm_id` varchar(36) NOT NULL,
+  `instance_net_id` varchar(36) NOT NULL,
+  `interface_id` varchar(36) DEFAULT NULL,
+  `vim_interface_id` varchar(128) DEFAULT NULL,
+  `mac_address` varchar(32) DEFAULT NULL,
+  `ip_address` varchar(64) DEFAULT NULL,
+  `vim_info` text,
+  `type` enum('internal','external') NOT NULL COMMENT 'Indicates if this interface is external to a vnf, or internal',
+  `floating_ip` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if a floating_ip must be associated to this interface',
+  `port_security` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled',
+  `sdn_port_id` varchar(36) DEFAULT NULL COMMENT 'Port id in ovim',
+  `compute_node` varchar(100) DEFAULT NULL COMMENT 'Compute node id used to specify the SDN port mapping',
+  `pci` varchar(50) DEFAULT NULL COMMENT 'PCI of the  physical port in the host',
+  `vlan` smallint(5) unsigned DEFAULT NULL COMMENT 'VLAN tag used by the port',
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_vms` (`instance_vm_id`),
+  KEY `FK_instance_nets` (`instance_net_id`),
+  KEY `FK_instance_ids` (`interface_id`),
+  CONSTRAINT `FK_instance_ids` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_nets` FOREIGN KEY (`instance_net_id`) REFERENCES `instance_nets` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_vms` FOREIGN KEY (`instance_vm_id`) REFERENCES `instance_vms` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Table with all running associattion among VM instances and net instances';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_nets`
+--
+
+DROP TABLE IF EXISTS `instance_nets`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_nets` (
+  `uuid` varchar(36) NOT NULL,
+  `vim_net_id` varchar(128) DEFAULT NULL,
+  `vim_name` varchar(255) DEFAULT NULL,
+  `instance_scenario_id` varchar(36) DEFAULT NULL,
+  `sce_net_id` varchar(36) DEFAULT NULL,
+  `net_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) DEFAULT NULL,
+  `datacenter_tenant_id` varchar(36) NOT NULL,
+  `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `vim_info` text,
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
+  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `sdn_net_id` varchar(36) DEFAULT NULL COMMENT 'Network id in ovim',
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_nets_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_nets_sce_nets` (`sce_net_id`),
+  KEY `FK_instance_nets_nets` (`net_id`),
+  KEY `FK_instance_nets_datacenters` (`datacenter_id`),
+  KEY `FK_instance_nets_datacenter_tenants` (`datacenter_tenant_id`),
+  CONSTRAINT `FK_instance_nets_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_nets_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_nets_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of networks';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_scenarios`
+--
+
+DROP TABLE IF EXISTS `instance_scenarios`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_scenarios` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `scenario_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) NOT NULL,
+  `datacenter_tenant_id` varchar(36) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `cloud_config` mediumtext,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_scenarios_nfvo_tenants` (`tenant_id`),
+  KEY `FK_instance_scenarios_vim_tenants` (`datacenter_tenant_id`),
+  KEY `FK_instance_scenarios_datacenters` (`datacenter_id`),
+  KEY `FK_instance_scenarios_scenarios` (`scenario_id`),
+  CONSTRAINT `FK_instance_scenarios_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_scenarios_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_scenarios_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_scenarios_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfis`
+--
+
+DROP TABLE IF EXISTS `instance_sfis`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfis` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_scenario_id` varchar(36) NOT NULL,
+  `vim_sfi_id` varchar(36) DEFAULT NULL,
+  `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) DEFAULT NULL,
+  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `vim_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_sfis_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_sfis_sce_rsp_hops` (`sce_rsp_hop_id`),
+  KEY `FK_instance_sfis_datacenters` (`datacenter_id`),
+  KEY `FK_instance_sfis_datacenter_tenants` (`datacenter_tenant_id`),
+  CONSTRAINT `FK_instance_sfis_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_sfis_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_sfis_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_sfis_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfps`
+--
+
+DROP TABLE IF EXISTS `instance_sfps`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfps` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_scenario_id` varchar(36) NOT NULL,
+  `vim_sfp_id` varchar(36) DEFAULT NULL,
+  `sce_rsp_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) DEFAULT NULL,
+  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `vim_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_sfps_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_sfps_sce_rsps` (`sce_rsp_id`),
+  KEY `FK_instance_sfps_datacenters` (`datacenter_id`),
+  KEY `FK_instance_sfps_datacenter_tenants` (`datacenter_tenant_id`),
+  CONSTRAINT `FK_instance_sfps_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_sfps_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_sfps_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_sfps_sce_rsps` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_sfs`
+--
+
+DROP TABLE IF EXISTS `instance_sfs`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_sfs` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_scenario_id` varchar(36) NOT NULL,
+  `vim_sf_id` varchar(36) DEFAULT NULL,
+  `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) DEFAULT NULL,
+  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `vim_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_sfs_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_sfs_sce_rsp_hops` (`sce_rsp_hop_id`),
+  KEY `FK_instance_sfs_datacenters` (`datacenter_id`),
+  KEY `FK_instance_sfs_datacenter_tenants` (`datacenter_tenant_id`),
+  CONSTRAINT `FK_instance_sfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_sfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_sfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_sfs_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_vms`
+--
+
+DROP TABLE IF EXISTS `instance_vms`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_vms` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_vnf_id` varchar(36) NOT NULL,
+  `vm_id` varchar(36) DEFAULT NULL,
+  `vim_vm_id` varchar(128) DEFAULT NULL,
+  `vim_name` varchar(255) DEFAULT NULL,
+  `status` enum('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `vim_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `vim_vm_id` (`vim_vm_id`),
+  KEY `FK_instance_vms_vms` (`vm_id`),
+  KEY `FK_instance_vms_instance_vnfs` (`instance_vnf_id`),
+  CONSTRAINT `FK_instance_vms_instance_vnfs` FOREIGN KEY (`instance_vnf_id`) REFERENCES `instance_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_vms_vms` FOREIGN KEY (`vm_id`) REFERENCES `vms` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VMs as part of VNF instances';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_vnfs`
+--
+
+DROP TABLE IF EXISTS `instance_vnfs`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_vnfs` (
+  `uuid` varchar(36) NOT NULL,
+  `instance_scenario_id` varchar(36) NOT NULL,
+  `vnf_id` varchar(36) NOT NULL,
+  `sce_vnf_id` varchar(36) DEFAULT NULL,
+  `datacenter_id` varchar(36) DEFAULT NULL,
+  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_vnfs_vnfs` (`vnf_id`),
+  KEY `FK_instance_vnfs_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_vnfs_sce_vnfs` (`sce_vnf_id`),
+  KEY `FK_instance_vnfs_datacenters` (`datacenter_id`),
+  KEY `FK_instance_vnfs_datacenter_tenants` (`datacenter_tenant_id`),
+  CONSTRAINT `FK_instance_vnfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_instance_vnfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_instance_vnfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_vnfs_sce_vnfs` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_vnfs_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VNFs as part of a scenario';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `instance_wim_nets`
+--
+
+DROP TABLE IF EXISTS `instance_wim_nets`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `instance_wim_nets` (
+  `uuid` varchar(36) NOT NULL,
+  `wim_internal_id` varchar(128) DEFAULT NULL COMMENT 'Internal ID used by the WIM to refer to the network',
+  `instance_scenario_id` varchar(36) DEFAULT NULL,
+  `sce_net_id` varchar(36) DEFAULT NULL,
+  `wim_id` varchar(36) DEFAULT NULL,
+  `wim_account_id` varchar(36) NOT NULL,
+  `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','WIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `wim_info` text,
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at WIM',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
+  KEY `FK_instance_wim_nets_wims` (`wim_id`),
+  KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
+  CONSTRAINT `FK_instance_wim_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_wim_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_wim_nets_wim_accounts` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
+  CONSTRAINT `FK_instance_wim_nets_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of wim networks';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `interfaces`
+--
+
+DROP TABLE IF EXISTS `interfaces`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `interfaces` (
+  `uuid` varchar(36) NOT NULL,
+  `internal_name` varchar(255) NOT NULL,
+  `external_name` varchar(255) DEFAULT NULL,
+  `vm_id` varchar(36) NOT NULL,
+  `net_id` varchar(36) DEFAULT NULL,
+  `type` enum('mgmt','bridge','data') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+  `vpci` char(12) DEFAULT NULL,
+  `bw` mediumint(8) unsigned DEFAULT NULL COMMENT 'BW expressed in Mbits/s. Maybe this field is not necessary.',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `model` varchar(12) DEFAULT NULL,
+  `mac` char(18) DEFAULT NULL,
+  `ip_address` varchar(64) DEFAULT NULL,
+  `floating_ip` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if a floating_ip must be associated to this interface',
+  `port_security` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled',
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `internal_name_vm_id` (`internal_name`,`vm_id`),
+  KEY `FK_interfaces_vms` (`vm_id`),
+  KEY `FK_interfaces_nets` (`net_id`),
+  CONSTRAINT `FK_interfaces_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE CASCADE,
+  CONSTRAINT `FK_interfaces_vms` FOREIGN KEY (`vm_id`) REFERENCES `vms` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VM interfaces';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ip_profiles`
+--
+
+DROP TABLE IF EXISTS `ip_profiles`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `ip_profiles` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `net_id` varchar(36) DEFAULT NULL,
+  `sce_net_id` varchar(36) DEFAULT NULL,
+  `instance_net_id` varchar(36) DEFAULT NULL,
+  `ip_version` enum('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
+  `subnet_address` varchar(64) DEFAULT NULL,
+  `gateway_address` varchar(64) DEFAULT NULL,
+  `dns_address` varchar(255) DEFAULT NULL COMMENT 'dns ip list separated by semicolon',
+  `dhcp_enabled` enum('true','false') NOT NULL DEFAULT 'true',
+  `dhcp_start_address` varchar(64) DEFAULT NULL,
+  `dhcp_count` int(11) DEFAULT NULL,
+  `security_group` varchar(255) DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  KEY `FK_ipprofiles_nets` (`net_id`),
+  KEY `FK_ipprofiles_scenets` (`sce_net_id`),
+  KEY `FK_ipprofiles_instancenets` (`instance_net_id`),
+  CONSTRAINT `FK_ipprofiles_instancenets` FOREIGN KEY (`instance_net_id`) REFERENCES `instance_nets` (`uuid`) ON DELETE CASCADE,
+  CONSTRAINT `FK_ipprofiles_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE CASCADE,
+  CONSTRAINT `FK_ipprofiles_scenets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `logs`
+--
+
+DROP TABLE IF EXISTS `logs`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `logs` (
+  `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
+  `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+  `nfvo_tenant_id` varchar(36) DEFAULT NULL,
+  `related` varchar(36) NOT NULL COMMENT 'Relevant element for the log',
+  `uuid` varchar(36) DEFAULT NULL COMMENT 'Uuid of vnf, scenario, etc. that log relates to',
+  `level` enum('panic','error','info','debug','verbose') NOT NULL,
+  `description` varchar(200) NOT NULL,
+  PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=3423 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `nets`
+--
+
+DROP TABLE IF EXISTS `nets`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `nets` (
+  `uuid` varchar(36) NOT NULL,
+  `osm_id` varchar(255) DEFAULT NULL,
+  `vnf_id` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `vnf_id_name` (`vnf_id`,`name`),
+  CONSTRAINT `FK_nets_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a VNF definition. These are only the internal networks among VMs of the same VNF.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `nfvo_tenants`
+--
+
+DROP TABLE IF EXISTS `nfvo_tenants`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `nfvo_tenants` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `encrypted_RO_priv_key` varchar(2000) DEFAULT NULL,
+  `RO_pub_key` varchar(510) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name` (`name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_classifier_matches`
+--
+
+DROP TABLE IF EXISTS `sce_classifier_matches`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_classifier_matches` (
+  `uuid` varchar(36) NOT NULL,
+  `ip_proto` varchar(2) NOT NULL,
+  `source_ip` varchar(16) NOT NULL,
+  `destination_ip` varchar(16) NOT NULL,
+  `source_port` varchar(5) NOT NULL,
+  `destination_port` varchar(5) NOT NULL,
+  `sce_classifier_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_classifiers_classifier_match` (`sce_classifier_id`),
+  CONSTRAINT `FK_sce_classifiers_classifier_match` FOREIGN KEY (`sce_classifier_id`) REFERENCES `sce_classifiers` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_classifiers`
+--
+
+DROP TABLE IF EXISTS `sce_classifiers`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_classifiers` (
+  `uuid` varchar(36) NOT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `sce_vnffg_id` varchar(36) NOT NULL,
+  `sce_rsp_id` varchar(36) NOT NULL,
+  `sce_vnf_id` varchar(36) NOT NULL,
+  `interface_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_sce_vnffgs_classifier` (`sce_vnffg_id`),
+  KEY `FK_sce_rsps_classifier` (`sce_rsp_id`),
+  KEY `FK_sce_vnfs_classifier` (`sce_vnf_id`),
+  KEY `FK_interfaces_classifier` (`interface_id`),
+  CONSTRAINT `FK_interfaces_classifier` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_rsps_classifier` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_vnffgs_classifier` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_vnfs_classifier` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_interfaces`
+--
+
+DROP TABLE IF EXISTS `sce_interfaces`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_interfaces` (
+  `uuid` varchar(36) NOT NULL,
+  `sce_vnf_id` varchar(36) NOT NULL,
+  `sce_net_id` varchar(36) DEFAULT NULL,
+  `interface_id` varchar(36) DEFAULT NULL,
+  `ip_address` varchar(64) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_sce_interfaces_sce_vnfs` (`sce_vnf_id`),
+  KEY `FK_sce_interfaces_sce_nets` (`sce_net_id`),
+  KEY `FK_sce_interfaces_interfaces` (`interface_id`),
+  CONSTRAINT `FK_sce_interfaces_interfaces` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`),
+  CONSTRAINT `FK_sce_interfaces_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_interfaces_sce_vnfs` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNF interfaces in a scenario definition.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_nets`
+--
+
+DROP TABLE IF EXISTS `sce_nets`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_nets` (
+  `uuid` varchar(36) NOT NULL,
+  `osm_id` varchar(255) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `scenario_id` varchar(36) DEFAULT NULL COMMENT 'NULL if net is matched to several scenarios',
+  `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
+  `external` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, net is already present at VIM',
+  `description` varchar(255) DEFAULT NULL,
+  `vim_network_name` varchar(255) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `graph` varchar(2000) DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_sce_nets_scenarios` (`scenario_id`),
+  CONSTRAINT `FK_sce_nets_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a scenario definition. It only considers networks among VNFs. Networks among internal VMs are only considered in tble ''nets''.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_rsp_hops`
+--
+
+DROP TABLE IF EXISTS `sce_rsp_hops`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_rsp_hops` (
+  `uuid` varchar(36) NOT NULL,
+  `if_order` int(11) NOT NULL DEFAULT '0',
+  `ingress_interface_id` varchar(36) NOT NULL,
+  `egress_interface_id` varchar(36) NOT NULL,
+  `sce_vnf_id` varchar(36) NOT NULL,
+  `sce_rsp_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_interfaces_rsp_hop` (`ingress_interface_id`),
+  KEY `FK_sce_vnfs_rsp_hop` (`sce_vnf_id`),
+  KEY `FK_sce_rsps_rsp_hop` (`sce_rsp_id`),
+  KEY `FK_interfaces_rsp_hop_egress` (`egress_interface_id`),
+  CONSTRAINT `FK_interfaces_rsp_hop_egress` FOREIGN KEY (`egress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_interfaces_rsp_hop_ingress` FOREIGN KEY (`ingress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_rsps_rsp_hop` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_vnfs_rsp_hop` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_rsps`
+--
+
+DROP TABLE IF EXISTS `sce_rsps`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_rsps` (
+  `uuid` varchar(36) NOT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `sce_vnffg_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_sce_vnffgs_rsp` (`sce_vnffg_id`),
+  CONSTRAINT `FK_sce_vnffgs_rsp` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_vnffgs`
+--
+
+DROP TABLE IF EXISTS `sce_vnffgs`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_vnffgs` (
+  `uuid` varchar(36) NOT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `vendor` varchar(255) DEFAULT NULL,
+  `scenario_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_scenarios_sce_vnffg` (`scenario_id`),
+  KEY `FK_scenarios_vnffg` (`tenant_id`),
+  CONSTRAINT `FK_scenarios_vnffg` FOREIGN KEY (`tenant_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `sce_vnfs`
+--
+
+DROP TABLE IF EXISTS `sce_vnfs`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `sce_vnfs` (
+  `uuid` varchar(36) NOT NULL,
+  `member_vnf_index` varchar(255) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `scenario_id` varchar(36) NOT NULL,
+  `vnf_id` varchar(36) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `graph` varchar(2000) DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name_scenario_id` (`name`,`scenario_id`),
+  KEY `FK_sce_vnfs_scenarios` (`scenario_id`),
+  KEY `FK_sce_vnfs_vnfs` (`vnf_id`),
+  CONSTRAINT `FK_sce_vnfs_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_sce_vnfs_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNFs in scenario definitions. This table also contains the Physical Network Functions and the external elements such as MAN, Core, etc.\r\n';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `scenarios`
+--
+
+DROP TABLE IF EXISTS `scenarios`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `scenarios` (
+  `uuid` varchar(36) NOT NULL,
+  `osm_id` varchar(255) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `short_name` varchar(255) DEFAULT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `vendor` varchar(255) DEFAULT NULL,
+  `public` enum('true','false') NOT NULL DEFAULT 'false',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `descriptor` text COMMENT 'Original text descriptor used for create the scenario',
+  `cloud_config` mediumtext,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `osm_id_tenant_id` (`osm_id`,`tenant_id`),
+  KEY `FK_scenarios_nfvo_tenants` (`tenant_id`),
+  CONSTRAINT `FK_scenarios_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `schema_version`
+--
+
+DROP TABLE IF EXISTS `schema_version`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `schema_version` (
+  `version_int` int(11) NOT NULL COMMENT 'version as a number. Must not contain gaps',
+  `version` varchar(20) NOT NULL COMMENT 'version as a text',
+  `openmano_ver` varchar(20) NOT NULL COMMENT 'openmano version',
+  `comments` varchar(2000) DEFAULT NULL COMMENT 'changes to database',
+  `date` date DEFAULT NULL,
+  PRIMARY KEY (`version_int`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='database schema control version';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tenants_datacenters`
+--
+
+DROP TABLE IF EXISTS `tenants_datacenters`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `tenants_datacenters` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `nfvo_tenant_id` varchar(36) NOT NULL,
+  `datacenter_id` varchar(36) NOT NULL,
+  `datacenter_tenant_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `datacenter_nfvo_tenant` (`datacenter_id`,`nfvo_tenant_id`),
+  KEY `FK_nfvo_tenants_datacenters` (`datacenter_id`),
+  KEY `FK_nfvo_tenants_vim_tenants` (`datacenter_tenant_id`),
+  KEY `FK_tenants_datacenters_nfvo_tenants` (`nfvo_tenant_id`),
+  CONSTRAINT `FK_tenants_datacenters_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
+  CONSTRAINT `FK_tenants_datacenters_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
+  CONSTRAINT `FK_tenants_datacenters_nfvo_tenants` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`)
+) ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `uuids`
+--
+
+DROP TABLE IF EXISTS `uuids`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `uuids` (
+  `uuid` varchar(36) NOT NULL,
+  `root_uuid` varchar(36) DEFAULT NULL COMMENT 'Some related UUIDs can be grouped by this field, so that they can be deleted at once',
+  `created_at` double NOT NULL,
+  `used_at` varchar(36) DEFAULT NULL COMMENT 'Table that uses this UUID',
+  PRIMARY KEY (`uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with all unique IDs used to avoid UUID repetitions among different elements';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `vim_wim_actions`
+--
+
+DROP TABLE IF EXISTS `vim_wim_actions`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `vim_wim_actions` (
+  `instance_action_id` varchar(36) NOT NULL,
+  `task_index` int(6) NOT NULL,
+  `datacenter_vim_id` varchar(36) DEFAULT NULL,
+  `vim_id` varchar(64) DEFAULT NULL,
+  `wim_account_id` varchar(36) DEFAULT NULL,
+  `wim_internal_id` varchar(64) DEFAULT NULL,
+  `action` varchar(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
+  `item` enum('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_wim_nets') NOT NULL COMMENT 'table where the item is stored',
+  `item_id` varchar(36) DEFAULT NULL COMMENT 'uuid of the entry in the table',
+  `status` enum('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
+  `extra` text COMMENT 'json with params:, depends_on: for the task',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`task_index`,`instance_action_id`),
+  KEY `FK_actions_instance_actions` (`instance_action_id`),
+  KEY `FK_actions_vims` (`datacenter_vim_id`),
+  KEY `item_type_id` (`item`,`item_id`),
+  KEY `FK_actions_wims` (`wim_account_id`),
+  CONSTRAINT `FK_actions_instance_actions` FOREIGN KEY (`instance_action_id`) REFERENCES `instance_actions` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_actions_vims` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with the individual VIM actions.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `vms`
+--
+
+DROP TABLE IF EXISTS `vms`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `vms` (
+  `uuid` varchar(36) NOT NULL,
+  `osm_id` varchar(255) DEFAULT NULL,
+  `pdu_type` varchar(255) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `vnf_id` varchar(36) NOT NULL,
+  `count` smallint(6) NOT NULL DEFAULT '1',
+  `flavor_id` varchar(36) NOT NULL COMMENT 'Link to flavor table',
+  `image_id` varchar(36) DEFAULT NULL COMMENT 'Link to image table',
+  `image_list` text COMMENT 'Alternative images',
+  `image_path` varchar(100) DEFAULT NULL COMMENT 'Path where the image of the VM is located',
+  `boot_data` text,
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `availability_zone` varchar(255) DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name_vnf_id` (`name`,`vnf_id`),
+  KEY `FK_vms_vnfs` (`vnf_id`),
+  KEY `FK_vms_images` (`image_id`),
+  KEY `FK_vms_flavors` (`flavor_id`),
+  CONSTRAINT `FK_vms_flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`),
+  CONSTRAINT `FK_vms_images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`),
+  CONSTRAINT `FK_vms_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VM definitions. It contains the set of VMs used by the VNF definitions.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `vnfs`
+--
+
+DROP TABLE IF EXISTS `vnfs`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `vnfs` (
+  `uuid` varchar(36) NOT NULL,
+  `osm_id` varchar(255) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `short_name` varchar(255) DEFAULT NULL,
+  `tenant_id` varchar(36) DEFAULT NULL,
+  `physical` enum('true','false') NOT NULL DEFAULT 'false',
+  `public` enum('true','false') NOT NULL DEFAULT 'false',
+  `description` varchar(255) DEFAULT NULL,
+  `vendor` varchar(255) DEFAULT NULL,
+  `mgmt_access` varchar(2000) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  `class` varchar(36) DEFAULT 'MISC',
+  `descriptor` text COMMENT 'Original text descriptor used for create the VNF',
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `osm_id_tenant_id` (`osm_id`,`tenant_id`),
+  KEY `FK_vnfs_nfvo_tenants` (`tenant_id`),
+  CONSTRAINT `FK_vnfs_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNF definitions. This is the catalogue of VNFs. It also includes Physical Network Functions or Physical Elements.\r\n';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_accounts`
+--
+
+DROP TABLE IF EXISTS `wim_accounts`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_accounts` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) DEFAULT NULL,
+  `wim_id` varchar(36) NOT NULL,
+  `created` enum('true','false') NOT NULL DEFAULT 'false',
+  `user` varchar(64) DEFAULT NULL,
+  `password` varchar(64) DEFAULT NULL,
+  `config` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `wim_name` (`wim_id`,`name`),
+  KEY `FK_wim_accounts_wims` (`wim_id`),
+  CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts by the user';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_nfvo_tenants`
+--
+
+DROP TABLE IF EXISTS `wim_nfvo_tenants`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_nfvo_tenants` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `nfvo_tenant_id` varchar(36) NOT NULL,
+  `wim_id` varchar(36) NOT NULL,
+  `wim_account_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
+  KEY `FK_wims_nfvo_tenants` (`wim_id`),
+  KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
+  KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
+  CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts mapping to NFVO tenants';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wim_port_mappings`
+--
+
+DROP TABLE IF EXISTS `wim_port_mappings`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wim_port_mappings` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `wim_id` varchar(36) NOT NULL,
+  `datacenter_id` varchar(36) NOT NULL,
+  `pop_switch_dpid` varchar(64) NOT NULL,
+  `pop_switch_port` varchar(64) NOT NULL,
+  `wan_service_endpoint_id` varchar(256) NOT NULL COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
+  `wan_service_mapping_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `unique_datacenter_port_mapping` (`datacenter_id`,`pop_switch_dpid`,`pop_switch_port`),
+  UNIQUE KEY `unique_wim_port_mapping` (`wim_id`,`wan_service_endpoint_id`),
+  KEY `FK_wims_wim_physical_connections` (`wim_id`),
+  KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
+  CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='WIM port mappings managed by the WIM.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `wims`
+--
+
+DROP TABLE IF EXISTS `wims`;
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `wims` (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `type` varchar(36) NOT NULL DEFAULT 'odl',
+  `wim_url` varchar(150) NOT NULL,
+  `config` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name` (`name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIMs managed by the NFVO.';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping routines for database 'mano_db'
+--
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2018-12-10  9:58:03
+
+
+
+
+
+-- MySQL dump 10.13  Distrib 5.7.24, for Linux (x86_64)
+--
+-- Host: localhost    Database: {{mano_db}}
+-- ------------------------------------------------------
+-- Server version      5.7.24
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Dumping data for table `schema_version`
+--
+
+LOCK TABLES `schema_version` WRITE;
+/*!40000 ALTER TABLE `schema_version` DISABLE KEYS */;
+INSERT INTO `schema_version` VALUES
+(0,'0.0','0.0.0','Database in init process','2015-05-08'),
+(1,'0.1','0.2.2','insert schema_version','2015-05-08'),
+(2,'0.2','0.2.5','new tables images,flavors','2015-07-13'),
+(3,'0.3','0.3.3','alter vim_tenant tables','2015-07-28'),
+(4,'0.4','0.3.5','enlarge graph field at sce_vnfs/nets','2015-10-20'),
+(5,'0.5','0.4.1','Add mac address for bridge interfaces','2015-12-14'),
+(6,'0.6','0.4.2','Adding VIM status info','2015-12-22'),
+(7,'0.7','0.4.3','Changing created_at time at database','2016-01-25'),
+(8,'0.8','0.4.32','Enlarging name at database','2016-02-01'),
+(9,'0.9','0.4.33','Add ACTIVE:NoMgmtIP to instance_vms table','2016-02-05'),
+(10,'0.10','0.4.36','tenant management of vnfs,scenarios','2016-03-08'),
+(11,'0.11','0.4.43','remove unique name at scenarios,instance_scenarios','2016-07-18'),
+(12,'0.12','0.4.46','create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces','2016-08-29'),
+(13,'0.13','0.4.47','insert cloud-config at scenarios,instance_scenarios','2016-08-30'),
+(14,'0.14','0.4.57','remove unique index vim_net_id, instance_scenario_id','2016-09-26'),
+(15,'0.15','0.4.59','add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL','2016-09-27'),
+(16,'0.16','0.5.2','enlarge vim_tenant_name and id. New config at datacenter_tenants','2016-10-11'),
+(17,'0.17','0.5.3','Extra description json format of additional devices in datacenter_flavors','2016-12-20'),
+(18,'0.18','0.5.4','Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'','2017-01-09'),
+(19,'0.19','0.5.5','Extra Boot-data content at VNFC (vms)','2017-01-11'),
+(20,'0.20','0.5.9','Added columns to store dataplane connectivity info','2017-03-13'),
+(21,'0.21','0.5.15','Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles','2017-06-02'),
+(22,'0.22','0.5.16','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-06-02'),
+(23,'0.23','0.5.20','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-08-29'),
+(24,'0.24','0.5.21','Added vnfd fields','2017-08-29'),
+(25,'0.25','0.5.22','Added osm_id to vnfs,scenarios','2017-09-01'),
+(26,'0.26','0.5.23','Several changes','2017-09-09'),
+(27,'0.27','0.5.25','Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants','2017-09-29'),
+(28,'0.28','0.5.28','Adding VNFFG-related tables','2017-11-20'),
+(29,'0.29','0.5.59','Change member_vnf_index to str accordingly to the model','2018-04-11'),
+(30,'0.30','0.5.60','Add image_list to vms','2018-04-24'),
+(31,'0.31','0.5.61','Add vim_network_name to sce_nets','2018-05-03'),
+(32,'0.32','0.5.70','Add vim_name to instance vms','2018-06-28'),
+(33,'0.33','0.5.82','Add pdu information to vms','2018-11-13'),
+(34,'0.34','0.6.00','Added WIM tables','2018-09-10'),
+(35,'0.35','0.6.02','Adding ingress and egress ports for RSPs','2018-12-11'),
+(36,'0.36','0.6.03','Allow vm without image_id for PDUs','2018-12-19');
+/*!40000 ALTER TABLE `schema_version` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2018-12-10  9:58:03
diff --git a/RO/osm_ro/database_utils/migrate_mano_db.sh b/RO/osm_ro/database_utils/migrate_mano_db.sh
new file mode 100755 (executable)
index 0000000..096a21a
--- /dev/null
@@ -0,0 +1,1581 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#
+#Upgrade/Downgrade openmano database preserving the content
+#
+DBUTILS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+DBUSER="mano"
+DBPASS=""
+DEFAULT_DBPASS="manopw"
+DBHOST=""
+DBPORT="3306"
+DBNAME="mano_db"
+QUIET_MODE=""
+BACKUP_DIR=""
+BACKUP_FILE=""
+#TODO update it with the last database version
+LAST_DB_VERSION=39
+
+# Detect paths
+MYSQL=$(which mysql)
+AWK=$(which awk)
+GREP=$(which grep)
+
+function usage(){
+    echo -e "Usage: $0 OPTIONS [version]"
+    echo -e "  Upgrades/Downgrades openmano database preserving the content."\
+            "If [version]  is not provided, it is upgraded to the last version"
+    echo -e "  OPTIONS"
+    echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
+    echo -e "     -p PASS  database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
+    echo -e "     -P PORT  database port. '$DBPORT' by default"
+    echo -e "     -h HOST  database host. 'localhost' by default"
+    echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
+    echo -e "     -b DIR   backup folder where to create rollback backup file"
+    echo -e "     -q --quiet: Do not prompt for credentials and exit if cannot access to database"
+    echo -e "     --help   shows this help"
+}
+
+while getopts ":u:p:b:P:h:d:q-:" o; do
+    case "${o}" in
+        u)
+            DBUSER="$OPTARG"
+            ;;
+        p)
+            DBPASS="$OPTARG"
+            ;;
+        P)
+            DBPORT="$OPTARG"
+            ;;
+        d)
+            DBNAME="$OPTARG"
+            ;;
+        h)
+            DBHOST="$OPTARG"
+            ;;
+        b)
+            BACKUP_DIR="$OPTARG"
+            ;;
+        q)
+            export QUIET_MODE=yes
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && continue
+            echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
+            exit 1
+            ;;
+        \?)
+            echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit 1
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+DB_VERSION=$1
+
+if [ -n "$DB_VERSION" ] ; then
+    # check it is a number and an allowed one
+    [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null || 
+        ! echo "parameter 'version' requires a integer value" >&2 || exit 1
+    if [ "$DB_VERSION" -lt 0 ] || [ "$DB_VERSION" -gt "$LAST_DB_VERSION" ] ; then
+        echo "parameter 'version' requires a valid database version between '0' and '$LAST_DB_VERSION'"\
+             "If you need an upper version, get a newer version of this script '$0'" >&2
+        exit 1
+    fi
+else
+    DB_VERSION="$LAST_DB_VERSION"
+fi
+
+# Creating temporary file
+TEMPFILE="$(mktemp -q --tmpdir "migratemanodb.XXXXXX")"
+trap 'rm -f "$TEMPFILE"' EXIT
+chmod 0600 "$TEMPFILE"
+DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
+echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+
+# Check and ask for database user password
+FIRST_TRY="yes"
+while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
+do
+    # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+    [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+        echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+        continue
+    echo "$DB_ERROR"
+    [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
+    echo -e "Provide database name and credentials (Ctrl+c to abort):"
+    read -e -p "    mysql database name($DBNAME): " KK
+    [ -n "$KK" ] && DBNAME="$KK"
+    read -e -p "    mysql user($DBUSER): " KK
+    [ -n "$KK" ] && DBUSER="$KK"
+    read -e -s -p "    mysql password: " DBPASS
+    echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+    FIRST_TRY=""
+    echo
+done
+
+DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
+#echo DBCMD $DBCMD
+
+#check that the database seems a openmano database
+if ! echo -e "show create table vnfs;\nshow create table scenarios" | $DBCMD >/dev/null 2>&1
+then
+    echo "    database $DBNAME does not seem to be an openmano database" >&2
+    exit 1;
+fi
+
+#GET DATABASE TARGET VERSION
+#DB_VERSION=0
+#[ $OPENMANO_VER_NUM -ge 2002 ] && DB_VERSION=1   #0.2.2 =>  1
+#[ $OPENMANO_VER_NUM -ge 2005 ] && DB_VERSION=2   #0.2.5 =>  2
+#[ $OPENMANO_VER_NUM -ge 3003 ] && DB_VERSION=3   #0.3.3 =>  3
+#[ $OPENMANO_VER_NUM -ge 3005 ] && DB_VERSION=4   #0.3.5 =>  4
+#[ $OPENMANO_VER_NUM -ge 4001 ] && DB_VERSION=5   #0.4.1 =>  5
+#[ $OPENMANO_VER_NUM -ge 4002 ] && DB_VERSION=6   #0.4.2 =>  6
+#[ $OPENMANO_VER_NUM -ge 4003 ] && DB_VERSION=7   #0.4.3 =>  7
+#[ $OPENMANO_VER_NUM -ge 4032 ] && DB_VERSION=8   #0.4.32=>  8
+#[ $OPENMANO_VER_NUM -ge 4033 ] && DB_VERSION=9   #0.4.33=>  9
+#[ $OPENMANO_VER_NUM -ge 4036 ] && DB_VERSION=10  #0.4.36=>  10
+#[ $OPENMANO_VER_NUM -ge 4043 ] && DB_VERSION=11  #0.4.43=>  11
+#[ $OPENMANO_VER_NUM -ge 4046 ] && DB_VERSION=12  #0.4.46=>  12
+#[ $OPENMANO_VER_NUM -ge 4047 ] && DB_VERSION=13  #0.4.47=>  13
+#[ $OPENMANO_VER_NUM -ge 4057 ] && DB_VERSION=14  #0.4.57=>  14
+#[ $OPENMANO_VER_NUM -ge 4059 ] && DB_VERSION=15  #0.4.59=>  15
+#[ $OPENMANO_VER_NUM -ge 5002 ] && DB_VERSION=16  #0.5.2 =>  16
+#[ $OPENMANO_VER_NUM -ge 5003 ] && DB_VERSION=17  #0.5.3 =>  17
+#[ $OPENMANO_VER_NUM -ge 5004 ] && DB_VERSION=18  #0.5.4 =>  18
+#[ $OPENMANO_VER_NUM -ge 5005 ] && DB_VERSION=19  #0.5.5 =>  19
+#[ $OPENMANO_VER_NUM -ge 5009 ] && DB_VERSION=20  #0.5.9 =>  20
+#[ $OPENMANO_VER_NUM -ge 5015 ] && DB_VERSION=21  #0.5.15 =>  21
+#[ $OPENMANO_VER_NUM -ge 5016 ] && DB_VERSION=22  #0.5.16 =>  22
+#[ $OPENMANO_VER_NUM -ge 5020 ] && DB_VERSION=23  #0.5.20 =>  23
+#[ $OPENMANO_VER_NUM -ge 5021 ] && DB_VERSION=24  #0.5.21 =>  24
+#[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25  #0.5.22 =>  25
+#[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26  #0.5.24 =>  26
+#[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27  #0.5.25 =>  27
+#[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28  #0.5.52 =>  28
+#[ $OPENMANO_VER_NUM -ge 5059 ] && DB_VERSION=29  #0.5.59 =>  29
+#[ $OPENMANO_VER_NUM -ge 5060 ] && DB_VERSION=30  #0.5.60 =>  30
+#[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31  #0.5.61 =>  31
+#[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32  #0.5.70 =>  32
+#[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33  #0.5.82 =>  33
+#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34  #0.6.00 =>  34
+#[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35  #0.6.01 =>  35
+#[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36  #0.6.03 =>  36
+#[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37  #0.6.09 =>  37
+#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38  #0.6.11 =>  38
+#[ $OPENMANO_VER_NUM -ge 6020 ] && DB_VERSION=39  #0.6.20 =>  39
+#TODO ... put next versions here
+
+function upgrade_to_1(){
+    # echo "    upgrade database from version 0.0 to version 0.1"
+    echo "      CREATE TABLE \`schema_version\`"
+    sql "CREATE TABLE \`schema_version\` (
+       \`version_int\` INT NOT NULL COMMENT 'version as a number. Must not contain gaps',
+       \`version\` VARCHAR(20) NOT NULL COMMENT 'version as a text',
+       \`openmano_ver\` VARCHAR(20) NOT NULL COMMENT 'openmano version',
+       \`comments\` VARCHAR(2000) NULL COMMENT 'changes to database',
+       \`date\` DATE NULL,
+       PRIMARY KEY (\`version_int\`)
+       )
+       COMMENT='database schema control version'
+       COLLATE='utf8_general_ci'
+       ENGINE=InnoDB;"
+    sql "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openmano_ver\`, \`comments\`, \`date\`)
+        VALUES (1, '0.1', '0.2.2', 'insert schema_version', '2015-05-08');"
+}
+function downgrade_from_1(){
+    # echo "    downgrade database from version 0.1 to version 0.0"
+    echo "      DROP TABLE IF EXISTS \`schema_version\`"
+    sql "DROP TABLE IF EXISTS \`schema_version\`;"
+}
+function upgrade_to_2(){
+    # echo "    upgrade database from version 0.1 to version 0.2"
+    echo "      Add columns user/passwd to table 'vim_tenants'"
+    sql "ALTER TABLE vim_tenants ADD COLUMN user VARCHAR(36) NULL COMMENT 'Credentials for vim' AFTER created,
+       ADD COLUMN passwd VARCHAR(50) NULL COMMENT 'Credentials for vim' AFTER user;"
+    echo "      Add table 'images' and 'datacenters_images'"
+    sql "CREATE TABLE images (
+       uuid VARCHAR(36) NOT NULL,
+       name VARCHAR(50) NOT NULL,
+       location VARCHAR(200) NOT NULL,
+       description VARCHAR(100) NULL,
+       metadata VARCHAR(400) NULL,
+       PRIMARY KEY (uuid),
+       UNIQUE INDEX location (location)  )
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    sql "CREATE TABLE datacenters_images (
+       id INT NOT NULL AUTO_INCREMENT,
+       image_id VARCHAR(36) NOT NULL,
+       datacenter_id VARCHAR(36) NOT NULL,
+       vim_id VARCHAR(36) NOT NULL,
+       PRIMARY KEY (id),
+       CONSTRAINT FK__images FOREIGN KEY (image_id) REFERENCES images (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+       CONSTRAINT FK__datacenters_i FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE  )
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      migrate data from table 'vms' into 'images'"
+    sql "INSERT INTO images (uuid, name, location) SELECT DISTINCT vim_image_id, vim_image_id, image_path FROM vms;"
+    sql "INSERT INTO datacenters_images (image_id, datacenter_id, vim_id)
+          SELECT DISTINCT vim_image_id, datacenters.uuid, vim_image_id FROM vms JOIN datacenters;"
+    echo "      Add table 'flavors' and 'datacenter_flavors'"
+    sql "CREATE TABLE flavors (
+       uuid VARCHAR(36) NOT NULL,
+       name VARCHAR(50) NOT NULL,
+       description VARCHAR(100) NULL,
+       disk SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+       ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+       vcpus SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+       extended VARCHAR(2000) NULL DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
+       PRIMARY KEY (uuid)  )
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    sql "CREATE TABLE datacenters_flavors (
+       id INT NOT NULL AUTO_INCREMENT,
+       flavor_id VARCHAR(36) NOT NULL,
+       datacenter_id VARCHAR(36) NOT NULL,
+       vim_id VARCHAR(36) NOT NULL,
+       PRIMARY KEY (id),
+       CONSTRAINT FK__flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+       CONSTRAINT FK__datacenters_f FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE  )
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      migrate data from table 'vms' into 'flavors'"
+    sql "INSERT INTO flavors (uuid, name) SELECT DISTINCT vim_flavor_id, vim_flavor_id FROM vms;"
+    sql "INSERT INTO datacenters_flavors (flavor_id, datacenter_id, vim_id)
+          SELECT DISTINCT vim_flavor_id, datacenters.uuid, vim_flavor_id FROM vms JOIN datacenters;"
+    sql "ALTER TABLE vms ALTER vim_flavor_id DROP DEFAULT, ALTER vim_image_id DROP DEFAULT;
+          ALTER TABLE vms CHANGE COLUMN vim_flavor_id flavor_id VARCHAR(36) NOT NULL COMMENT 'Link to flavor table' AFTER vnf_id,
+          CHANGE COLUMN vim_image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER flavor_id, 
+          ADD CONSTRAINT FK_vms_images  FOREIGN KEY (image_id) REFERENCES  images (uuid),
+          ADD CONSTRAINT FK_vms_flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid);"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (2, '0.2', '0.2.5', 'new tables images,flavors', '2015-07-13');"
+
+}   
+     
+function downgrade_from_2(){
+    # echo "    downgrade database from version 0.2 to version 0.1"
+    echo "       migrate back data from 'datacenters_images' 'datacenters_flavors' into 'vms'"
+    sql "ALTER TABLE vms ALTER image_id DROP DEFAULT, ALTER flavor_id DROP DEFAULT;
+          ALTER TABLE vms CHANGE COLUMN flavor_id vim_flavor_id VARCHAR(36) NOT NULL COMMENT 'Flavor ID in the VIM DB' AFTER vnf_id,
+          CHANGE COLUMN image_id vim_image_id VARCHAR(36) NOT NULL COMMENT 'Image ID in the VIM DB' AFTER vim_flavor_id,
+          DROP FOREIGN KEY FK_vms_flavors, DROP INDEX FK_vms_flavors,
+          DROP FOREIGN KEY FK_vms_images, DROP INDEX FK_vms_images;"
+#    echo "UPDATE v SET v.vim_image_id=di.vim_id
+#          FROM  vms as v INNER JOIN images as i ON v.vim_image_id=i.uuid 
+#          INNER JOIN datacenters_images as di ON i.uuid=di.image_id;"
+    echo "      Delete columns 'user/passwd' from 'vim_tenants'"
+    sql "ALTER TABLE vim_tenants DROP COLUMN user, DROP COLUMN passwd; "
+    echo "        delete tables 'datacenter_images', 'images'"
+    sql "DROP TABLE IF EXISTS \`datacenters_images\`;"
+    sql "DROP TABLE IF EXISTS \`images\`;"
+    echo "        delete tables 'datacenter_flavors', 'flavors'"
+    sql "DROP TABLE IF EXISTS \`datacenters_flavors\`;"
+    sql "DROP TABLE IF EXISTS \`flavors\`;"
+    sql "DELETE FROM schema_version WHERE version_int='2';"
+}
+
+function upgrade_to_3(){
+    # echo "    upgrade database from version 0.2 to version 0.3"
+    echo "      Change table 'logs', 'uuids"
+    sql "ALTER TABLE logs CHANGE COLUMN related related VARCHAR(36) NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
+    sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at VARCHAR(36) NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
+    echo "      Add column created to table 'datacenters_images' and 'datacenters_flavors'"
+    for table in datacenters_images datacenters_flavors
+    do
+        sql "ALTER TABLE $table ADD COLUMN created ENUM('true','false') NOT NULL DEFAULT 'false' 
+            COMMENT 'Indicates if it has been created by openmano, or already existed' AFTER vim_id;"
+    done
+    sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(2000) NULL DEFAULT NULL AFTER description;"
+    echo "      Allow null to column 'vim_interface_id' in 'instance_interfaces'"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
+    echo "      Add column config to table 'datacenters'"
+    sql "ALTER TABLE datacenters ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL COMMENT 'extra config information in json' AFTER vim_url_admin;
+       "
+    echo "      Add column datacenter_id to table 'vim_tenants'"
+    sql "ALTER TABLE vim_tenants ADD COLUMN datacenter_id VARCHAR(36) NULL COMMENT 'Datacenter of this tenant' AFTER uuid,
+       DROP INDEX name, DROP INDEX vim_tenant_id;"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL COMMENT 'tenant name at VIM' AFTER datacenter_id,
+       CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+    echo "UPDATE vim_tenants as vt LEFT JOIN tenants_datacenters as td ON vt.uuid=td.vim_tenant_id
+       SET vt.datacenter_id=td.datacenter_id;"
+    sql "DELETE FROM vim_tenants WHERE datacenter_id is NULL;"
+    sql "ALTER TABLE vim_tenants ALTER datacenter_id DROP DEFAULT;
+       ALTER TABLE vim_tenants
+       CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL COMMENT 'Datacenter of this tenant' AFTER uuid;"
+    sql "ALTER TABLE vim_tenants ADD CONSTRAINT FK_vim_tenants_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid)
+       ON UPDATE CASCADE ON DELETE CASCADE;"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (3, '0.3', '0.3.3', 'alter vim_tenant tables', '2015-07-28');"
+}
+
+
+function downgrade_from_3(){
+    # echo "    downgrade database from version 0.3 to version 0.2"
+    echo "      Change back table 'logs', 'uuids'"
+    sql "ALTER TABLE logs CHANGE COLUMN related related ENUM('nfvo_tenants','datacenters','vim_tenants','tenants_datacenters','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
+    sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at ENUM('nfvo_tenants','datacenters','vim_tenants','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
+    echo "      Delete column created from table 'datacenters_images' and 'datacenters_flavors'"
+    for table in datacenters_images datacenters_flavors
+    do
+        sql "ALTER TABLE $table DROP COLUMN created;"
+    done
+    sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(400) NULL DEFAULT NULL AFTER description;"
+    echo "      Deny back null to column 'vim_interface_id' in 'instance_interfaces'"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NOT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
+    echo "       Delete column config to table 'datacenters'"
+    sql "ALTER TABLE datacenters DROP COLUMN config;"
+    echo "       Delete column datacenter_id to table 'vim_tenants'"
+    sql "ALTER TABLE vim_tenants DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_vim_tenants_datacenters;"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name name VARCHAR(36) NULL DEFAULT NULL COMMENT '' AFTER uuid"
+    sql "ALTER TABLE vim_tenants ALTER name DROP DEFAULT;"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN name name VARCHAR(36) NOT NULL AFTER uuid" || ! echo "Warning changing column name at vim_tenants!"
+    sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX name (name);" || ! echo "Warning add unique index name at vim_tenants!"
+    sql "ALTER TABLE vim_tenants ALTER vim_tenant_id DROP DEFAULT;"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NOT NULL COMMENT 'Tenant ID in the VIM DB' AFTER name;" ||
+        ! echo "Warning changing column vim_tenant_id at vim_tenants!"
+    sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX vim_tenant_id (vim_tenant_id);" ||
+        ! echo "Warning add unique index vim_tenant_id at vim_tenants!"
+    sql "DELETE FROM schema_version WHERE version_int='3';"
+}
+
+function upgrade_to_4(){
+    # echo "    upgrade database from version 0.3 to version 0.4"
+    echo "      Enlarge graph field at tables 'sce_vnfs', 'sce_nets'"
+    for table in sce_vnfs sce_nets
+    do
+        sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
+    done
+    sql "ALTER TABLE datacenters CHANGE COLUMN type type VARCHAR(36) NOT NULL DEFAULT 'openvim' AFTER description;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (4, '0.4', '0.3.5', 'enlarge graph field at sce_vnfs/nets', '2015-10-20');"
+}
+
+function downgrade_from_4(){
+    # echo "    downgrade database from version 0.4 to version 0.3"
+    echo "      Shorten back graph field at tables 'sce_vnfs', 'sce_nets'"
+    for table in sce_vnfs sce_nets
+    do
+        sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
+    done
+    sql "ALTER TABLE datacenters CHANGE COLUMN type type ENUM('openvim','openstack') NOT NULL DEFAULT 'openvim' AFTER description;"
+    sql "DELETE FROM schema_version WHERE version_int='4';"
+}
+
+function upgrade_to_5(){
+    # echo "    upgrade database from version 0.4 to version 0.5"
+    echo "      Add 'mac' field for bridge interfaces in table 'interfaces'"
+    sql "ALTER TABLE interfaces ADD COLUMN mac CHAR(18) NULL DEFAULT NULL AFTER model;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (5, '0.5', '0.4.1', 'Add mac address for bridge interfaces', '2015-12-14');"
+}
+function downgrade_from_5(){
+    # echo "    downgrade database from version 0.5 to version 0.4"
+    echo "      Remove 'mac' field for bridge interfaces in table 'interfaces'"
+    sql "ALTER TABLE interfaces DROP COLUMN mac;"
+    sql "DELETE FROM schema_version WHERE version_int='5';"
+}
+
+function upgrade_to_6(){
+    # echo "    upgrade database from version 0.5 to version 0.6"
+    echo "      Add 'descriptor' field text to 'vnfd', 'scenarios'"
+    sql "ALTER TABLE vnfs ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the VNF' AFTER class;"
+    sql "ALTER TABLE scenarios ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the scenario' AFTER modified_at;"
+    echo "      Add 'last_error', 'vim_info' to 'instance_vms', 'instance_nets'"
+    sql "ALTER TABLE instance_vms  ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
+    sql "ALTER TABLE instance_vms  ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
+    sql "ALTER TABLE instance_vms  CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD' AFTER vim_vm_id;"
+    sql "ALTER TABLE instance_nets ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
+    sql "ALTER TABLE instance_nets ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
+    echo "      Add 'mac_address', 'ip_address', 'vim_info' to 'instance_interfaces'"
+    sql "ALTER TABLE instance_interfaces ADD COLUMN mac_address VARCHAR(32) NULL DEFAULT NULL AFTER vim_interface_id, ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac_address, ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER ip_address;"
+    echo "      Add 'sce_vnf_id','datacenter_id','vim_tenant_id' field to 'instance_vnfs'"
+    sql "ALTER TABLE instance_vnfs ADD COLUMN sce_vnf_id VARCHAR(36) NULL DEFAULT NULL AFTER vnf_id, ADD CONSTRAINT FK_instance_vnfs_sce_vnfs FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+    sql "ALTER TABLE instance_vnfs ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_vnf_id, ADD CONSTRAINT FK_instance_vnfs_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+    sql "ALTER TABLE instance_vnfs ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_vnfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+    echo "      Add 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field to 'instance_nets'"
+    sql "ALTER TABLE instance_nets ADD COLUMN sce_net_id VARCHAR(36) NULL DEFAULT NULL AFTER instance_scenario_id, ADD CONSTRAINT FK_instance_nets_sce_nets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+    sql "ALTER TABLE instance_nets ADD COLUMN net_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_net_id, ADD CONSTRAINT FK_instance_nets_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+    sql "ALTER TABLE instance_nets ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER net_id, ADD CONSTRAINT FK_instance_nets_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+    sql "ALTER TABLE instance_nets ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_nets_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (6, '0.6', '0.4.2', 'Adding VIM status info', '2015-12-22');"
+}
+function downgrade_from_6(){
+    # echo "    downgrade database from version 0.6 to version 0.5"
+    echo "      Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
+    sql "ALTER TABLE vnfs      DROP COLUMN descriptor;"
+    sql "ALTER TABLE scenarios DROP COLUMN descriptor;"
+    echo "      Remove 'last_error', 'vim_info' from 'instance_vms', 'instance_nets'"
+    sql "ALTER TABLE instance_vms  DROP COLUMN error_msg, DROP COLUMN vim_info;"
+    sql "ALTER TABLE instance_vms  CHANGE COLUMN status status ENUM('ACTIVE','PAUSED','INACTIVE','CREATING','ERROR','DELETING') NOT NULL DEFAULT 'CREATING' AFTER vim_vm_id;"
+    sql "ALTER TABLE instance_nets DROP COLUMN error_msg, DROP COLUMN vim_info;"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
+    echo "      Remove 'mac_address', 'ip_address', 'vim_info' from 'instance_interfaces'"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN mac_address, DROP COLUMN ip_address, DROP COLUMN vim_info;"
+    echo "      Remove 'sce_vnf_id','datacenter_id','vim_tenant_id' field from 'instance_vnfs'"
+    sql "ALTER TABLE instance_vnfs DROP COLUMN sce_vnf_id, DROP FOREIGN KEY FK_instance_vnfs_sce_vnfs;"
+    sql "ALTER TABLE instance_vnfs DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_vnfs_vim_tenants;"
+    sql "ALTER TABLE instance_vnfs DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_vnfs_datacenters;"
+    echo "      Remove 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field from 'instance_nets'"
+    sql "ALTER TABLE instance_nets DROP COLUMN sce_net_id, DROP FOREIGN KEY FK_instance_nets_sce_nets;"
+    sql "ALTER TABLE instance_nets DROP COLUMN net_id, DROP FOREIGN KEY FK_instance_nets_nets;"
+    sql "ALTER TABLE instance_nets DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_nets_vim_tenants;"
+    sql "ALTER TABLE instance_nets DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_nets_datacenters;"
+    sql "DELETE FROM schema_version WHERE version_int='6';"
+}
+
+function upgrade_to_7(){
+    # echo "    upgrade database from version 0.6 to version 0.7"
+    echo "      Change created_at, modified_at from timestamp to unix float at all database"
+    for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
+    do
+         echo -en "        $table               \r"
+         sql "ALTER TABLE $table ADD COLUMN created_at_ DOUBLE NOT NULL after created_at;"
+         echo "UPDATE $table SET created_at_=unix_timestamp(created_at);"
+         sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at DOUBLE NOT NULL;"
+         [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at DOUBLE NULL DEFAULT NULL;"
+    done
+    
+    echo "      Add 'descriptor' field text to 'vnfd', 'scenarios'"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (7, '0.7', '0.4.3', 'Changing created_at time at database', '2016-01-25');"
+}
+function downgrade_from_7(){
+    # echo "    downgrade database from version 0.7 to version 0.6"
+    echo "      Change back created_at, modified_at from unix float to timestamp at all database"
+    for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
+    do
+         echo -en "        $table               \r"
+         sql "ALTER TABLE $table ADD COLUMN created_at_ TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP after created_at;"
+         echo "UPDATE $table SET created_at_=from_unixtime(created_at);"
+         sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP;"
+         [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at TIMESTAMP NULL DEFAULT NULL;"
+    done
+    echo "      Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
+    sql "DELETE FROM schema_version WHERE version_int='7';"
+}
+
+function upgrade_to_8(){
+    # echo "    upgrade database from version 0.7 to version 0.8"
+    echo "      Change enalarge name, description to 255 at all database"
+    for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
+    do
+         echo -en "        $table               \r"
+         sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR(255) NOT NULL;"
+         sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL;"
+    done
+    echo -en "        interfaces           \r"
+    sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(255) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(255) NULL DEFAULT NULL;"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL;"
+    echo -en "        vim_tenants          \r"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(64) NULL DEFAULT NULL;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (8, '0.8', '0.4.32', 'Enlarging name at database', '2016-02-01');"
+}
+function downgrade_from_8(){
+    # echo "    downgrade database from version 0.8 to version 0.7"
+    echo "      Change back name,description to shorter length at all database"
+    for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
+    do
+         name_length=50
+         [[ $table == flavors ]] || [[ $table == images ]] || name_length=36 
+         echo -en "        $table               \r"
+         sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL;"
+         sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL;"
+    done
+    echo -en "        interfaces           \r"
+    sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(25) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(25) NULL DEFAULT NULL;"
+    echo -en "        vim_tenants          \r"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL;"
+    sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(50) NULL DEFAULT NULL;"
+    sql "DELETE FROM schema_version WHERE version_int='8';"
+}
+function upgrade_to_9(){
+    # echo "    upgrade database from version 0.8 to version 0.9"
+    echo "      Add more status to 'instance_vms'"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (9, '0.9', '0.4.33', 'Add ACTIVE:NoMgmtIP to instance_vms table', '2016-02-05');"
+}
+function downgrade_from_9(){
+    # echo "    downgrade database from version 0.9 to version 0.8"
+    echo "      Add more status to 'instance_vms'"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+    sql "DELETE FROM schema_version WHERE version_int='9';"
+}
+function upgrade_to_10(){
+    # echo "    upgrade database from version 0.9 to version 0.10"
+    echo "      add tenant to 'vnfs'"
+    sql "ALTER TABLE vnfs ADD COLUMN tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER name, ADD CONSTRAINT FK_vnfs_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE SET NULL, CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'false' AFTER physical, DROP INDEX name, DROP INDEX path, DROP COLUMN path;"
+    sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
+    sql "ALTER TABLE scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
+    sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
+    sql "ALTER TABLE instance_scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
+    echo "      rename 'vim_tenants' table to 'datacenter_tenants'"
+    echo "RENAME TABLE vim_tenants TO datacenter_tenants;"
+    for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
+    do
+        NULL="NOT NULL"
+        [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
+        sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_vim_tenants;"
+        sql "ALTER TABLE ${table} ALTER vim_tenant_id DROP DEFAULT;"
+        sql "ALTER TABLE ${table} CHANGE COLUMN vim_tenant_id datacenter_tenant_id VARCHAR(36)  ${NULL} AFTER datacenter_id, ADD CONSTRAINT FK_${table}_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid); "
+    done    
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (10, '0.10', '0.4.36', 'tenant management of vnfs,scenarios', '2016-03-08');"
+}
+
+function downgrade_from_10(){
+    # echo "    downgrade database from version 0.10 to version 0.9"
+    echo "      remove tenant from 'vnfs'"
+    sql "ALTER TABLE vnfs DROP COLUMN tenant_id, DROP FOREIGN KEY FK_vnfs_nfvo_tenants, ADD UNIQUE INDEX name (name), ADD COLUMN path VARCHAR(100) NULL DEFAULT NULL COMMENT 'Path where the YAML descriptor of the VNF can be found. NULL if it is a physical network function.' AFTER name, ADD UNIQUE INDEX path (path), CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'true' AFTER physical;"
+    sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
+    sql "ALTER TABLE scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
+    sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
+    sql "ALTER TABLE instance_scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
+    echo "      rename back 'datacenter_tenants' table to 'vim_tenants'"
+    echo "RENAME TABLE datacenter_tenants TO vim_tenants;"
+    for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
+    do
+        sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_datacenter_tenants;"
+        NULL="NOT NULL"
+        [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
+        sql "ALTER TABLE ${table} ALTER datacenter_tenant_id DROP DEFAULT;"
+        sql "ALTER TABLE ${table} CHANGE COLUMN datacenter_tenant_id vim_tenant_id VARCHAR(36) $NULL AFTER datacenter_id, ADD CONSTRAINT FK_${table}_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid); "
+    done    
+    sql "DELETE FROM schema_version WHERE version_int='10';"
+}
+
+function upgrade_to_11(){
+    # echo "    upgrade database from version 0.10 to version 0.11"
+    echo "      remove unique name at 'scenarios', 'instance_scenarios'"
+    sql "ALTER TABLE scenarios DROP INDEX name;"
+    sql "ALTER TABLE instance_scenarios DROP INDEX name;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (11, '0.11', '0.4.43', 'remove unique name at scenarios,instance_scenarios', '2016-07-18');"
+}
+function downgrade_from_11(){
+    # echo "    downgrade database from version 0.11 to version 0.10"
+    echo "      add unique name at 'scenarios', 'instance_scenarios'"
+    sql "ALTER TABLE scenarios ADD UNIQUE INDEX name (name);"
+    sql "ALTER TABLE instance_scenarios ADD UNIQUE INDEX name (name);"
+    sql "DELETE FROM schema_version WHERE version_int='11';"
+}
+
+function upgrade_to_12(){
+    # echo "    upgrade database from version 0.11 to version 0.12"
+    echo "      create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to 'interfaces' and 'sce_interfaces'"
+    sql "CREATE TABLE IF NOT EXISTS ip_profiles (
+       id INT(11) NOT NULL AUTO_INCREMENT,
+       net_id VARCHAR(36) NULL DEFAULT NULL,
+       sce_net_id VARCHAR(36) NULL DEFAULT NULL,
+       instance_net_id VARCHAR(36) NULL DEFAULT NULL,
+       ip_version ENUM('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
+       subnet_address VARCHAR(64) NULL DEFAULT NULL,
+       gateway_address VARCHAR(64) NULL DEFAULT NULL,
+       dns_address VARCHAR(64) NULL DEFAULT NULL,
+       dhcp_enabled ENUM('true','false') NOT NULL DEFAULT 'true',
+       dhcp_start_address VARCHAR(64) NULL DEFAULT NULL,
+       dhcp_count INT(11) NULL DEFAULT NULL,
+       PRIMARY KEY (id),
+       CONSTRAINT FK_ipprofiles_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON DELETE CASCADE,
+       CONSTRAINT FK_ipprofiles_scenets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON DELETE CASCADE,
+       CONSTRAINT FK_ipprofiles_instancenets FOREIGN KEY (instance_net_id) REFERENCES instance_nets (uuid) ON DELETE CASCADE  )
+        COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.'
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    sql "ALTER TABLE interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac;"
+    sql "ALTER TABLE sce_interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER interface_id;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (12, '0.12', '0.4.46', 'create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces', '2016-08-29');"
+}
+function downgrade_from_12(){
+    # echo "    downgrade database from version 0.12 to version 0.11"
+    echo "      delete ip_profiles table, and remove ip_address column in 'interfaces' and 'sce_interfaces'"
+    sql "DROP TABLE IF EXISTS ip_profiles;"
+    sql "ALTER TABLE interfaces DROP COLUMN ip_address;"
+    sql "ALTER TABLE sce_interfaces DROP COLUMN ip_address;"
+    sql "DELETE FROM schema_version WHERE version_int='12';"
+}
+
+function upgrade_to_13(){
+    # echo "    upgrade database from version 0.12 to version 0.13"
+    echo "      add cloud_config at 'scenarios', 'instance_scenarios'"
+    sql "ALTER TABLE scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER descriptor;"
+    sql "ALTER TABLE instance_scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER modified_at;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (13, '0.13', '0.4.47', 'insert cloud-config at scenarios,instance_scenarios', '2016-08-30');"
+}
+function downgrade_from_13(){
+    # echo "    downgrade database from version 0.13 to version 0.12"
+    echo "      remove cloud_config at 'scenarios', 'instance_scenarios'"
+    sql "ALTER TABLE scenarios DROP COLUMN cloud_config;"
+    sql "ALTER TABLE instance_scenarios DROP COLUMN cloud_config;"
+    sql "DELETE FROM schema_version WHERE version_int='13';"
+}
+
+function upgrade_to_14(){
+    # echo "    upgrade database from version 0.13 to version 0.14"
+    echo "      remove unique index vim_net_id, instance_scenario_id at table 'instance_nets'"
+    sql "ALTER TABLE instance_nets DROP INDEX vim_net_id_instance_scenario_id;"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN external created ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM' AFTER multipoint;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (14, '0.14', '0.4.57', 'remove unique index vim_net_id, instance_scenario_id', '2016-09-26');"
+}
+function downgrade_from_14(){
+    # echo "    downgrade database from version 0.14 to version 0.13"
+    echo "      remove cloud_config at 'scenarios', 'instance_scenarios'"
+    sql "ALTER TABLE instance_nets ADD UNIQUE INDEX vim_net_id_instance_scenario_id (vim_net_id, instance_scenario_id);"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN created external ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, means that it already exists at VIM' AFTER multipoint;"
+    sql "DELETE FROM schema_version WHERE version_int='14';"
+}
+
+function upgrade_to_15(){
+    # echo "    upgrade database from version 0.14 to version 0.15"
+    echo "      add columns 'universal_name' and 'checksum' at table 'images', add unique index universal_name_checksum, and change location to allow NULL; change column 'image_path' in table 'vms' to allow NULL"
+    sql "ALTER TABLE images ADD COLUMN checksum VARCHAR(32) NULL DEFAULT NULL AFTER name;"
+    sql "ALTER TABLE images ALTER location DROP DEFAULT;"
+    sql "ALTER TABLE images ADD COLUMN universal_name VARCHAR(255) NULL AFTER name, CHANGE COLUMN location location VARCHAR(200) NULL AFTER checksum, ADD UNIQUE INDEX universal_name_checksum (universal_name, checksum);"
+    sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
+    sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (15, '0.15', '0.4.59', 'add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL', '2016-09-27');"
+}
+function downgrade_from_15(){
+    # echo "    downgrade database from version 0.15 to version 0.14"
+    echo "      remove columns 'universal_name' and 'checksum' from table 'images', remove index universal_name_checksum, change location NOT NULL; change column 'image_path' in table 'vms' to NOT NULL"
+    sql "ALTER TABLE images DROP INDEX universal_name_checksum;"
+    sql "ALTER TABLE images ALTER location DROP DEFAULT;"
+    sql "ALTER TABLE images CHANGE COLUMN location location VARCHAR(200) NOT NULL AFTER checksum;"
+    sql "ALTER TABLE images DROP COLUMN universal_name;"
+    sql "ALTER TABLE images DROP COLUMN checksum;"
+    sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
+    sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NOT NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
+    sql "DELETE FROM schema_version WHERE version_int='15';"
+}
+
+function upgrade_to_16(){
+    # echo "    upgrade database from version 0.15 to version 0.16"
+    echo "      add column 'config' at table 'datacenter_tenants', enlarge 'vim_tenant_name/id'"
+    sql "ALTER TABLE datacenter_tenants ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL AFTER passwd;"
+    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(256) NULL DEFAULT NULL AFTER datacenter_id;"
+    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(256) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (16, '0.16', '0.5.2', 'enlarge vim_tenant_name and id. New config at datacenter_tenants', '2016-10-11');"
+}
+function downgrade_from_16(){
+    # echo "    downgrade database from version 0.16 to version 0.15"
+    echo "      remove column 'config' at table 'datacenter_tenants', restoring lenght 'vim_tenant_name/id'"
+    sql "ALTER TABLE datacenter_tenants DROP COLUMN config;"
+    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL AFTER datacenter_id;"
+    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+    sql "DELETE FROM schema_version WHERE version_int='16';"
+}
+
+function upgrade_to_17(){
+    # echo "    upgrade database from version 0.16 to version 0.17"
+    echo "      add column 'extended' at table 'datacenter_flavors'"
+    sql "ALTER TABLE datacenters_flavors ADD extended varchar(2000) NULL COMMENT 'Extra description json format of additional devices';"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (17, '0.17', '0.5.3', 'Extra description json format of additional devices in datacenter_flavors', '2016-12-20');"
+}
+function downgrade_from_17(){
+    # echo "    downgrade database from version 0.17 to version 0.16"
+    echo "      remove column 'extended' from table 'datacenter_flavors'"
+    sql "ALTER TABLE datacenters_flavors DROP COLUMN extended;"
+    sql "DELETE FROM schema_version WHERE version_int='17';"
+}
+
+function upgrade_to_18(){
+    # echo "    upgrade database from version 0.17 to version 0.18"
+    echo "      add columns 'floating_ip' and 'port_security' at tables 'interfaces' and 'instance_interfaces'"
+    sql "ALTER TABLE interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
+    sql "ALTER TABLE interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
+    sql "ALTER TABLE instance_interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
+    sql "ALTER TABLE instance_interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (18, '0.18', '0.5.4', 'Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'', '2017-01-09');"
+}
+function downgrade_from_18(){
+    # echo "    downgrade database from version 0.18 to version 0.17"
+    echo "      remove columns 'floating_ip' and 'port_security' from tables 'interfaces' and 'instance_interfaces'"
+    sql "ALTER TABLE interfaces DROP COLUMN floating_ip;"
+    sql "ALTER TABLE interfaces DROP COLUMN port_security;"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN floating_ip;"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN port_security;"
+    sql "DELETE FROM schema_version WHERE version_int='18';"
+}
+
+function upgrade_to_19(){
+    # echo "    upgrade database from version 0.18 to version 0.19"
+    echo "      add column 'boot_data' at table 'vms'"
+    sql "ALTER TABLE vms ADD COLUMN boot_data TEXT NULL DEFAULT NULL AFTER image_path;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (19, '0.19', '0.5.5', 'Extra Boot-data content at VNFC (vms)', '2017-01-11');"
+}
+function downgrade_from_19(){
+    # echo "    downgrade database from version 0.19 to version 0.18"
+    echo "      remove column 'boot_data' from table 'vms'"
+    sql "ALTER TABLE vms DROP COLUMN boot_data;"
+    sql "DELETE FROM schema_version WHERE version_int='19';"
+}
+
+function upgrade_to_20(){
+    # echo "    upgrade database from version 0.19 to version 0.20"
+    echo "      add column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+    sql "ALTER TABLE instance_nets ADD sdn_net_id varchar(36) DEFAULT NULL NULL COMMENT 'Network id in ovim';"
+    sql "ALTER TABLE instance_interfaces ADD sdn_port_id varchar(36) DEFAULT NULL NULL COMMENT 'Port id in ovim';"
+    sql "ALTER TABLE instance_interfaces ADD compute_node varchar(100) DEFAULT NULL NULL COMMENT 'Compute node id used to specify the SDN port mapping';"
+    sql "ALTER TABLE instance_interfaces ADD pci varchar(12) DEFAULT NULL NULL COMMENT 'PCI of the physical port in the host';"
+    sql "ALTER TABLE instance_interfaces ADD vlan SMALLINT UNSIGNED DEFAULT NULL NULL COMMENT 'VLAN tag used by the port';"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (20, '0.20', '0.5.9', 'Added columns to store dataplane connectivity info', '2017-03-13');"
+}
+function downgrade_from_20(){
+    # echo "    downgrade database from version 0.20 to version 0.19"
+    echo "      remove column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+    sql "ALTER TABLE instance_nets DROP COLUMN sdn_net_id;"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN vlan;"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN pci;"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN compute_node;"
+    sql "ALTER TABLE instance_interfaces DROP COLUMN sdn_port_id;"
+    sql "DELETE FROM schema_version WHERE version_int='20';"
+}
+
+function upgrade_to_21(){
+    # echo "    upgrade database from version 0.20 to version 0.21"
+    echo "      edit 'instance_nets' to allow instance_scenario_id=None"
+    sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NULL;"
+    echo "      enlarge column 'dns_address' at table 'ip_profiles'"
+    sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(255) DEFAULT NULL NULL "\
+         "comment 'dns ip list separated by semicolon';"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (21, '0.21', '0.5.15', 'Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles', '2017-06-02');"
+}
+function downgrade_from_21(){
+    # echo "    downgrade database from version 0.21 to version 0.20"
+    echo "      edit 'instance_nets' to disallow instance_scenario_id=None"
+    #Delete all lines with a instance_scenario_id=NULL in order to disable this option
+    sql "DELETE FROM instance_nets WHERE instance_scenario_id IS NULL;"
+    sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NOT NULL;"
+    echo "      shorten column 'dns_address' at table 'ip_profiles'"
+    sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(64) DEFAULT NULL NULL;"
+    sql "DELETE FROM schema_version WHERE version_int='21';"
+}
+
+function upgrade_to_22(){
+    # echo "    upgrade database from version 0.21 to version 0.22"
+    echo "      Changed type of ram in 'flavors' from SMALLINT to MEDIUMINT"
+    sql "ALTER TABLE flavors CHANGE COLUMN ram ram MEDIUMINT(7) UNSIGNED NULL DEFAULT NULL AFTER disk;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (22, '0.22', '0.5.16', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-06-02');"
+}
+function downgrade_from_22(){
+    # echo "    downgrade database from version 0.22 to version 0.21"
+    echo "      Changed type of ram in 'flavors' from MEDIUMINT to SMALLINT"
+    sql "ALTER TABLE flavors CHANGE COLUMN ram ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER disk;"
+    sql "DELETE FROM schema_version WHERE version_int='22';"
+}
+
+function upgrade_to_23(){
+    # echo "    upgrade database from version 0.22 to version 0.23"
+    echo "      add column 'availability_zone' at table 'vms'"
+    sql "ALTER TABLE vms ADD COLUMN availability_zone VARCHAR(255) NULL AFTER modified_at;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (23, '0.23', '0.5.20',"\
+        "'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-08-29');"
+}
+function downgrade_from_23(){
+    # echo "    downgrade database from version 0.23 to version 0.22"
+    echo "      remove column 'availability_zone' from table 'vms'"
+    sql "ALTER TABLE vms DROP COLUMN availability_zone;"
+    sql "DELETE FROM schema_version WHERE version_int='23';"
+}
+
+function upgrade_to_24(){
+    # echo "    upgrade database from version 0.23 to version 0.24"
+    echo "      Add 'count' to table 'vms'"
+
+    sql "ALTER TABLE vms ADD COLUMN count SMALLINT NOT NULL DEFAULT '1' AFTER vnf_id;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (24, '0.24', '0.5.21', 'Added vnfd fields', '2017-08-29');"
+}
+function downgrade_from_24(){
+    # echo "    downgrade database from version 0.24 to version 0.23"
+    echo "      Remove 'count' from table 'vms'"
+    sql "ALTER TABLE vms DROP COLUMN count;"
+    sql "DELETE FROM schema_version WHERE version_int='24';"
+}
+function upgrade_to_25(){
+    # echo "    upgrade database from version 0.24 to version 0.25"
+    echo "      Add 'osm_id','short_name','vendor' to tables 'vnfs', 'scenarios'"
+    for table in vnfs scenarios; do
+        sql "ALTER TABLE $table ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid, "\
+             "ADD UNIQUE INDEX osm_id_tenant_id (osm_id, tenant_id), "\
+             "ADD COLUMN short_name VARCHAR(255) NULL AFTER name, "\
+             "ADD COLUMN vendor VARCHAR(255) NULL AFTER description;"
+    done
+    sql "ALTER TABLE vnfs ADD COLUMN mgmt_access VARCHAR(2000) NULL AFTER vendor;"
+    sql "ALTER TABLE vms ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+    sql "ALTER TABLE sce_vnfs ADD COLUMN member_vnf_index SMALLINT(6) NULL DEFAULT NULL AFTER uuid;"
+    echo "      Add 'security_group' to table 'ip_profiles'"
+    sql "ALTER TABLE ip_profiles ADD COLUMN security_group VARCHAR(255) NULL DEFAULT NULL AFTER dhcp_count;"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (25, '0.25', '0.5.22', 'Added osm_id to vnfs,scenarios', '2017-09-01');"
+}
+function downgrade_from_25(){
+    # echo "    downgrade database from version 0.25 to version 0.24"
+    echo "      Remove 'osm_id','short_name','vendor' from tables 'vnfs', 'scenarios'"
+    for table in vnfs scenarios; do
+        sql "ALTER TABLE $table DROP INDEX  osm_id_tenant_id, DROP COLUMN osm_id, "\
+             "DROP COLUMN short_name, DROP COLUMN vendor;"
+    done
+    sql "ALTER TABLE vnfs DROP COLUMN mgmt_access;"
+    sql "ALTER TABLE vms DROP COLUMN osm_id;"
+    sql "ALTER TABLE sce_vnfs DROP COLUMN member_vnf_index;"
+    echo "      Remove 'security_group' from table 'ip_profiles'"
+    sql "ALTER TABLE ip_profiles DROP COLUMN security_group;"
+
+    sql "DELETE FROM schema_version WHERE version_int='25';"
+}
+
+function upgrade_to_26(){
+    echo "      Add name to table datacenter_tenants"
+    sql "ALTER TABLE datacenter_tenants ADD COLUMN name VARCHAR(255) NULL AFTER uuid;"
+    sql "UPDATE datacenter_tenants as dt join datacenters as d on dt.datacenter_id = d.uuid set dt.name=d.name;"
+    echo "      Add 'SCHEDULED' to 'status' at tables 'instance_nets', 'instance_vms'"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
+         "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') "\
+         "NOT NULL DEFAULT 'BUILD';"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','DOWN','BUILD','ERROR',"\
+         "'VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD';"
+    echo "      Enlarge pci at instance_interfaces to allow extended pci for SDN por mapping"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(50) NULL DEFAULT NULL COMMENT 'PCI of the "\
+        "physical port in the host' AFTER compute_node;"
+
+    for t in flavor image; do
+        echo "      Change 'datacenters_${t}s' to point to datacenter_tenant, add status, vim_info"
+        sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_vim_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
+            "datacenter_id, ADD COLUMN status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED',"\
+            "'SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD' AFTER vim_id, ADD COLUMN vim_info "\
+            "TEXT NULL AFTER status;"
+        sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.datacenter_id=df.datacenter_id "\
+            "set df.datacenter_vim_id=dt.uuid;"
+        sql "DELETE FROM datacenters_${t}s WHERE datacenter_vim_id is NULL;"
+        sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_vim_id datacenter_vim_id VARCHAR(36) NOT NULL;"
+        sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK_datacenters_${t}s_datacenter_tenants FOREIGN KEY "\
+            "(datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE;"
+        sql "ALTER TABLE datacenters_${t}s DROP FOREIGN KEY FK__datacenters_${t:0:1};"
+        sql "ALTER TABLE datacenters_${t}s DROP COLUMN datacenter_id;"
+       done
+
+    echo "      Decoupling 'instance_interfaces' from scenarios/vnfs to allow scale actions"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT NULL;"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
+       sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
+           "REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+
+    echo "      Decoupling 'instance_vms' from scenarios/vnfs to allow scale actions"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(128) NULL DEFAULT NULL;"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
+       sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
+           "REFERENCES vms (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+
+    echo "      Decoupling 'instance_nets' from scenarios/vnfs to allow scale actions"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL;"
+
+    echo "      Decoupling 'instance_scenarios' from scenarios"
+    sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
+       sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
+           "REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+
+    echo "      Create table instance_actions, vim_actions"
+    sql "CREATE TABLE IF NOT EXISTS instance_actions (
+           uuid VARCHAR(36) NOT NULL,
+           tenant_id VARCHAR(36) NULL DEFAULT NULL,
+           instance_id VARCHAR(36) NULL DEFAULT NULL,
+           description VARCHAR(64) NULL DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
+           number_tasks SMALLINT(6) NOT NULL DEFAULT '1',
+           number_done SMALLINT(6) NOT NULL DEFAULT '0',
+           number_failed SMALLINT(6) NOT NULL DEFAULT '0',
+           created_at DOUBLE NOT NULL,
+           modified_at DOUBLE NULL DEFAULT NULL,
+           PRIMARY KEY (uuid),
+        INDEX FK_actions_tenants (tenant_id),
+       CONSTRAINT FK_actions_tenant FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+               COMMENT='Contains client actions over instances'
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"  
+
+    sql "CREATE TABLE IF NOT EXISTS vim_actions (
+           instance_action_id VARCHAR(36) NOT NULL,
+           task_index INT(6) NOT NULL,
+           datacenter_vim_id VARCHAR(36) NOT NULL,
+           vim_id VARCHAR(64) NULL DEFAULT NULL,
+           action VARCHAR(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
+           item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored',
+           item_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'uuid of the entry in the table',
+           status ENUM('SCHEDULED', 'BUILD', 'DONE', 'FAILED', 'SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
+           extra TEXT NULL DEFAULT NULL COMMENT 'json with params:, depends_on: for the task',
+           error_msg VARCHAR(1024) NULL DEFAULT NULL,
+           created_at DOUBLE NOT NULL,
+           modified_at DOUBLE NULL DEFAULT NULL,
+           PRIMARY KEY (task_index, instance_action_id),
+        INDEX FK_actions_instance_actions (instance_action_id),
+       CONSTRAINT FK_actions_instance_actions FOREIGN KEY (instance_action_id) REFERENCES instance_actions (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        INDEX FK_actions_vims (datacenter_vim_id),
+       CONSTRAINT FK_actions_vims FOREIGN KEY (datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COMMENT='Table with the individual VIM actions.'
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"  
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (26, '0.26', '0.5.23', 'Several changes', '2017-09-09');"
+}
+function downgrade_from_26(){
+    echo "      Remove name from table datacenter_tenants"
+    sql "ALTER TABLE datacenter_tenants DROP COLUMN name;"
+    echo "      Remove 'SCHEDULED' from the 'status' at tables 'instance_nets', 'instance_vms'"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
+         "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR',"\
+         "'INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD';"
+    echo "      Shorten back pci at instance_interfaces to allow extended pci for SDN por mapping"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(12) NULL DEFAULT NULL COMMENT 'PCI of the "\
+        "physical port in the host' AFTER compute_node;"
+
+    for t in flavor image; do
+        echo "      Restore back 'datacenters_${t}s'"
+        sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
+            "${t}_id, DROP COLUMN status, DROP COLUMN vim_info ;"
+        sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.uuid=df.datacenter_vim_id set "\
+            "df.datacenter_id=dt.datacenter_id;"
+        sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL;"
+        sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK__datacenters_${t:0:1} FOREIGN KEY "\
+            "(datacenter_id) REFERENCES datacenters (uuid), DROP FOREIGN KEY FK_datacenters_${t}s_datacenter_tenants, "\
+            "DROP COLUMN datacenter_vim_id;"
+    done
+
+    echo "      Restore back 'instance_interfaces' coupling to scenarios/vnfs"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
+           "REFERENCES interfaces (uuid);"
+
+    echo "      Restore back 'instance_vms' coupling to scenarios/vnfs"
+    echo "      Decoupling 'instance vms' from scenarios/vnfs to allow scale actions"
+    sql "UPDATE instance_vms SET vim_vm_id='' WHERE vim_vm_id is NULL;"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
+           "REFERENCES vms (uuid);"
+
+    echo "      Restore back 'instance_nets' coupling to scenarios/vnfs"
+    sql "UPDATE instance_nets SET vim_net_id='' WHERE vim_net_id is NULL;"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL;"
+
+    echo "      Restore back  'instance_scenarios' coupling to scenarios"
+       sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
+    sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
+           "REFERENCES scenarios (uuid);"
+
+    echo "      Delete table instance_actions"
+    sql "DROP TABLE IF EXISTS vim_actions"
+    sql "DROP TABLE IF EXISTS instance_actions"
+    sql "DELETE FROM schema_version WHERE version_int='26';"
+}
+
+function upgrade_to_27(){
+    echo "      Added 'encrypted_RO_priv_key','RO_pub_key' to table 'nfvo_tenants'"
+    sql "ALTER TABLE nfvo_tenants ADD COLUMN encrypted_RO_priv_key VARCHAR(2000) NULL AFTER description;"
+    sql "ALTER TABLE nfvo_tenants ADD COLUMN RO_pub_key VARCHAR(510) NULL AFTER encrypted_RO_priv_key;"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (27, '0.27', '0.5.25', 'Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants', '2017-09-29');"
+}
+function downgrade_from_27(){
+    echo "      Remove 'encrypted_RO_priv_key','RO_pub_key' from table 'nfvo_tenants'"
+    sql "ALTER TABLE nfvo_tenants DROP COLUMN encrypted_RO_priv_key;"
+    sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
+    sql "DELETE FROM schema_version WHERE version_int='27';"
+}
+function upgrade_to_28(){
+    echo "      [Adding necessary tables for VNFFG]"
+    echo "      Adding sce_vnffgs"
+    sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
+            uuid VARCHAR(36) NOT NULL,
+            tenant_id VARCHAR(36) NULL DEFAULT NULL,
+            name VARCHAR(255) NOT NULL,
+            description VARCHAR(255) NULL DEFAULT NULL,
+            vendor VARCHAR(255) NULL DEFAULT NULL,
+            scenario_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_scenarios_sce_vnffg (scenario_id),
+        CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_rsps"
+    sql "CREATE TABLE IF NOT EXISTS sce_rsps (
+            uuid VARCHAR(36) NOT NULL,
+            tenant_id VARCHAR(36) NULL DEFAULT NULL,
+            name VARCHAR(255) NOT NULL,
+            sce_vnffg_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
+        CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_rsp_hops"
+    sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
+            uuid VARCHAR(36) NOT NULL,
+            if_order INT DEFAULT 0 NOT NULL,
+            interface_id VARCHAR(36) NOT NULL,
+            sce_vnf_id VARCHAR(36) NOT NULL,
+            sce_rsp_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_interfaces_rsp_hop (interface_id),
+        INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
+        INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
+        CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_classifiers"
+    sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
+            uuid VARCHAR(36) NOT NULL,
+            tenant_id VARCHAR(36) NULL DEFAULT NULL,
+            name VARCHAR(255) NOT NULL,
+            sce_vnffg_id VARCHAR(36) NOT NULL,
+            sce_rsp_id VARCHAR(36) NOT NULL,
+            sce_vnf_id VARCHAR(36) NOT NULL,
+            interface_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
+        INDEX FK_sce_rsps_classifier (sce_rsp_id),
+        INDEX FK_sce_vnfs_classifier (sce_vnf_id),
+        INDEX FK_interfaces_classifier (interface_id),
+        CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_classifier_matches"
+    sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
+            uuid VARCHAR(36) NOT NULL,
+            ip_proto VARCHAR(2) NOT NULL,
+            source_ip VARCHAR(16) NOT NULL,
+            destination_ip VARCHAR(16) NOT NULL,
+            source_port VARCHAR(5) NOT NULL,
+            destination_port VARCHAR(5) NOT NULL,
+            sce_classifier_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_classifiers_classifier_match (sce_classifier_id),
+        CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+
+    echo "      [Adding necessary tables for VNFFG-SFC instance mapping]"
+    echo "      Adding instance_sfis"
+    sql "CREATE TABLE IF NOT EXISTS instance_sfis (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_sfi_id varchar(36) DEFAULT NULL,
+          sce_rsp_hop_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+          PRIMARY KEY (uuid),
+      KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
+      KEY FK_instance_sfis_datacenters (datacenter_id),
+      KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+    echo "      Adding instance_sfs"
+    sql "CREATE TABLE IF NOT EXISTS instance_sfs (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_sf_id varchar(36) DEFAULT NULL,
+          sce_rsp_hop_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+      PRIMARY KEY (uuid),
+      KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
+      KEY FK_instance_sfs_datacenters (datacenter_id),
+      KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+    echo "      Adding instance_classifications"
+    sql "CREATE TABLE IF NOT EXISTS instance_classifications (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_classification_id varchar(36) DEFAULT NULL,
+          sce_classifier_match_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+      PRIMARY KEY (uuid),
+      KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
+      KEY FK_instance_classifications_datacenters (datacenter_id),
+      KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+    echo "      Adding instance_sfps"
+    sql "CREATE TABLE IF NOT EXISTS instance_sfps (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_sfp_id varchar(36) DEFAULT NULL,
+          sce_rsp_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+      PRIMARY KEY (uuid),
+      KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
+      KEY FK_instance_sfps_datacenters (datacenter_id),
+      KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+
+
+    echo "      [Altering vim_actions table]"
+    sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
+}
+function downgrade_from_28(){
+    echo "      [Undo adding the VNFFG tables]"
+    echo "      Dropping instance_sfps"
+    sql "DROP TABLE IF EXISTS instance_sfps;"
+    echo "      Dropping sce_classifications"
+    sql "DROP TABLE IF EXISTS instance_classifications;"
+    echo "      Dropping instance_sfs"
+    sql "DROP TABLE IF EXISTS instance_sfs;"
+    echo "      Dropping instance_sfis"
+    sql "DROP TABLE IF EXISTS instance_sfis;"
+    echo "      Dropping sce_classifier_matches"
+    echo "      [Undo adding the VNFFG-SFC instance mapping tables]"
+    sql "DROP TABLE IF EXISTS sce_classifier_matches;"
+    echo "      Dropping sce_classifiers"
+    sql "DROP TABLE IF EXISTS sce_classifiers;"
+    echo "      Dropping sce_rsp_hops"
+    sql "DROP TABLE IF EXISTS sce_rsp_hops;"
+    echo "      Dropping sce_rsps"
+    sql "DROP TABLE IF EXISTS sce_rsps;"
+    echo "      Dropping sce_vnffgs"
+    sql "DROP TABLE IF EXISTS sce_vnffgs;"
+    echo "      [Altering vim_actions table]"
+    sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
+    sql "DELETE FROM schema_version WHERE version_int='28';"
+}
+function upgrade_to_29(){
+    echo "      Change 'member_vnf_index' from int to str at 'sce_vnfs'"
+    sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index VARCHAR(255) NULL DEFAULT NULL AFTER uuid;"
+    echo "      Add osm_id to 'nets's and 'sce_nets'"
+    sql "ALTER TABLE nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+    sql "ALTER TABLE sce_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (29, '0.29', '0.5.59', 'Change member_vnf_index to str accordingly to the model', '2018-04-11');"
+}
+function downgrade_from_29(){
+    echo "      Change back 'member_vnf_index' from str to int at 'sce_vnfs'"
+    sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index SMALLINT NULL DEFAULT NULL AFTER uuid;"
+    echo "      Remove osm_id from 'nets's and 'sce_nets'"
+    sql "ALTER TABLE nets DROP COLUMN osm_id;"
+    sql "ALTER TABLE sce_nets DROP COLUMN osm_id;"
+    sql "DELETE FROM schema_version WHERE version_int='29';"
+}
+function upgrade_to_30(){
+    echo "      Add 'image_list' at 'vms' to allocate alternative images"
+    sql "ALTER TABLE vms ADD COLUMN image_list TEXT NULL COMMENT 'Alternative images' AFTER image_id;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (30, '0.30', '0.5.60', 'Add image_list to vms', '2018-04-24');"
+}
+function downgrade_from_30(){
+    echo "      Remove back 'image_list' from 'vms' to allocate alternative images"
+    sql "ALTER TABLE vms DROP COLUMN image_list;"
+    sql "DELETE FROM schema_version WHERE version_int='30';"
+}
+function upgrade_to_31(){
+    echo "      Add 'vim_network_name' at 'sce_nets'"
+    sql "ALTER TABLE sce_nets ADD COLUMN vim_network_name VARCHAR(255) NULL DEFAULT NULL AFTER description;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (31, '0.31', '0.5.61', 'Add vim_network_name to sce_nets', '2018-05-03');"
+}
+function downgrade_from_31(){
+    echo "      Remove back 'vim_network_name' from 'sce_nets'"
+    sql "ALTER TABLE sce_nets DROP COLUMN vim_network_name;"
+    sql "DELETE FROM schema_version WHERE version_int='31';"
+}
+function upgrade_to_32(){
+    echo "      Add 'vim_name' to 'instance_vms'"
+    sql "ALTER TABLE instance_vms ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_vm_id;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (32, '0.32', '0.5.70', 'Add vim_name to instance vms', '2018-06-28');"
+}
+function downgrade_from_32(){
+    echo "      Remove back 'vim_name' from 'instance_vms'"
+    sql "ALTER TABLE instance_vms DROP COLUMN vim_name;"
+    sql "DELETE FROM schema_version WHERE version_int='32';"
+}
+
+function upgrade_to_33(){
+    echo "      Add PDU information to 'vms'"
+    sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
+    sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
+}
+function downgrade_from_33(){
+    echo "      Remove back PDU information from 'vms'"
+    sql "ALTER TABLE vms DROP COLUMN pdu_type;"
+    sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
+    sql "DELETE FROM schema_version WHERE version_int='33';"
+}
+function upgrade_to_X(){
+    echo "      change 'datacenter_nets'"
+    sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
+}
+function downgrade_from_X(){
+    echo "      Change back 'datacenter_nets'"
+    sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
+}
+function upgrade_to_34() {
+    echo "      Create databases required for WIM features"
+    script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
+    sql "source ${script}"
+}
+function downgrade_from_34() {
+    echo "      Drop databases required for WIM features"
+    script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
+    sql "source ${script}"
+}
+function upgrade_to_35(){
+    echo "      Create databases required for WIM features"
+    script="$(find "${DBUTILS}/migrations/up" -iname "35*.sql" | tail -1)"
+    sql "source ${script}"
+}
+function downgrade_from_35(){
+    echo "      Drop databases required for WIM features"
+    script="$(find "${DBUTILS}/migrations/down" -iname "35*.sql" | tail -1)"
+    sql "source ${script}"
+}
+function upgrade_to_36(){
+    echo "      Allow null for image_id at 'vms'"
+    sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+    sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NULL COMMENT 'Link to image table' AFTER " \
+        "flavor_id;"
+    echo "      Enlarge config at 'wims' and 'wim_accounts'"
+    sql "ALTER TABLE wims CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER wim_url;"
+    sql "ALTER TABLE wim_accounts CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER password;"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (36, '0.36', '0.6.03', 'Allow vm without image_id for PDUs', '2018-12-19');"
+}
+function downgrade_from_36(){
+    echo "      Force back not null for image_id at 'vms'"
+    sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+    sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER " \
+        "flavor_id;"
+    # For downgrade do not restore wims/wim_accounts config to varchar 4000
+    sql "DELETE FROM schema_version WHERE version_int='36';"
+}
+function upgrade_to_37(){
+    echo "      Adding the enum tags for SFC"
+    sql "ALTER TABLE vim_wim_actions " \
+        "MODIFY COLUMN item " \
+        "ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces'," \
+            "'instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') " \
+        "NOT NULL COMMENT 'table where the item is stored';"
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+         "VALUES (37, '0.37', '0.6.09', 'Adding the enum tags for SFC', '2019-02-07');"
+}
+function downgrade_from_37(){
+    echo "      Adding the enum tags for SFC isn't going to be reversed"
+    # It doesn't make sense to reverse to a bug state.
+    sql "DELETE FROM schema_version WHERE version_int='37';"
+}
+function upgrade_to_38(){
+    echo "      Change vim_wim_actions, add worker, related"
+    sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
+           "ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
+           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
+           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+       sql "UPDATE vim_wim_actions set related=item_id;"
+       echo "      Change DONE to FINISHED when DELETE has been completed"
+       sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
+           "v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
+        "SET v1.status='FINISHED', v2.status='FINISHED';"
+    echo "      Add osm_id to instance_nets"
+    sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+    echo "      Add related to instance_xxxx"
+    for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
+        instance_vms
+    do
+        sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
+       sql "UPDATE $table set related=uuid;"
+    done
+    sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
+       sql "UPDATE instance_wim_nets set related=uuid;"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+        "VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
+
+}
+function downgrade_from_38(){
+    echo "      Change vim_wim_actions, delete worker, related"
+       sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
+    sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
+           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
+           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+    echo "      Remove related from instance_xxxx"
+    for table in instance_classifications instance_nets instance_wim_nets instance_sfis instance_sfps instance_sfs \
+        instance_vms
+    do
+        sql "ALTER TABLE $table DROP COLUMN related;"
+    done
+    echo "      Remove osm_id from instance_nets"
+    sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
+    sql "DELETE FROM schema_version WHERE version_int='38';"
+}
+
+function upgrade_to_39(){
+    echo "      Enlarge vim_id to 300 at all places"
+    sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
+    sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
+    sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NOT NULL AFTER name;"
+    sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(300)" \
+        " NULL DEFAULT NULL AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(300) NULL DEFAULT " \
+        " NULL AFTER interface_id;"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NULL DEFAULT NULL" \
+        " AFTER osm_id;"
+    sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(300) NULL DEFAULT NULL" \
+        " AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(300) NULL DEFAULT NULL" \
+        " AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(300) NULL DEFAULT NULL" \
+        " AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(300) NULL DEFAULT NULL" \
+        " AFTER instance_vnf_id, DROP INDEX vim_vm_id;"
+    sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(300) NULL DEFAULT NULL" \
+        " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
+    sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(300) NULL DEFAULT NULL" \
+        " AFTER datacenter_vim_id;"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+        "VALUES (39, '0.39', '0.6.20', 'Enlarge vim_id to 300 at all places', '2019-05-23');"
+}
+function downgrade_from_39(){
+    echo "      Set vim_id to original lenght at all places"
+    sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
+    sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
+    sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL AFTER name;"
+    sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(36)" \
+        " NULL DEFAULT NULL AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT " \
+        " NULL AFTER interface_id;"
+    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL" \
+        " AFTER osm_id;"
+    sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(36) NULL DEFAULT NULL" \
+        " AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(36) NULL DEFAULT NULL" \
+        " AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(36) NULL DEFAULT NULL" \
+        " AFTER instance_scenario_id;"
+    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NULL DEFAULT NULL" \
+        " AFTER instance_vnf_id, ADD UNIQUE INDEX vim_vm_id (vim_vm_id);"
+    sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(128) NULL DEFAULT NULL" \
+        " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
+    sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(64) NULL DEFAULT NULL" \
+        " AFTER datacenter_vim_id;"
+
+    sql "DELETE FROM schema_version WHERE version_int='39';"
+}
+#TODO ... put functions here
+
+
+function del_schema_version_process()
+{
+    echo "DELETE FROM schema_version WHERE version_int='0';" | $DBCMD ||
+        ! echo "    ERROR writing on schema_version" >&2 || exit 1
+}
+
+function set_schema_version_process()
+{
+    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES "\
+        "(0, '0.0', '0.0.0', 'migration from $DATABASE_VER_NUM to $DB_VERSION backup: $BACKUP_FILE',"\
+        "'$(date +%Y-%m-%d)');" | $DBCMD ||
+        ! echo  "    Cannot set database at migration process writing into schema_version" >&2 || exit 1
+
+}
+
+function rollback_db()
+{
+    if echo $DATABASE_PROCESS | grep -q init ; then   # Empty database. No backup needed
+        echo "    Aborted! Rollback database not needed" && exit 1
+    else   # migration a non empty database or Recovering a migration process
+        cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM && echo "    Aborted! Rollback database OK" &&
+            del_schema_version_process && rm -f "$BACKUP_FILE" && exit 1
+        echo "    Aborted! Rollback database FAIL" && exit 1
+    fi
+}
+
+function sql()    # send a sql command
+{
+    echo "$*" | $DBCMD || ! echo "    ERROR with command '$*'" || rollback_db
+    return 0
+}
+
+function migrate()
+{
+    #UPGRADE DATABASE step by step
+    while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
+    do
+        echo "    upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
+        DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
+        upgrade_to_${DATABASE_VER_NUM}
+        #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
+        #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
+        #$FILE_ || exit -1  # if fail return
+    done
+
+    #DOWNGRADE DATABASE step by step
+    while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
+    do
+        echo "    downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
+        #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
+        #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
+        #$FILE_ || exit -1  # if fail return
+        downgrade_from_${DATABASE_VER_NUM}
+        DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
+    done
+}
+
+
+# check if current database is ok
+function check_migration_needed()
+{
+    DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` ||
+    ! echo "    ERROR cannot read from schema_version" || exit 1
+
+    if [[ -z "$DATABASE_VER_NUM" ]] || [[ "$DATABASE_VER_NUM" -lt 0 ]] || [[ "$DATABASE_VER_NUM" -gt 100 ]] ; then
+        echo "    Error can not get database version ($DATABASE_VER_NUM?)" >&2
+        exit 1
+    fi
+
+    [[ $DB_VERSION -eq $DATABASE_VER_NUM ]] && echo "    current database version '$DATABASE_VER_NUM' is ok" && return 1
+    [[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ]] &&
+        echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
+        exit 1
+    return 0
+}
+
+DATABASE_PROCESS=`echo "select comments from schema_version where version_int=0;" | $DBCMD | tail -n+2` ||
+    ! echo "    ERROR cannot read from schema_version" || exit 1
+if [[ -z "$DATABASE_PROCESS" ]] ; then  # migration a non empty database
+    check_migration_needed || exit 0
+    # Create a backup database content
+    [[ -n "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q  "${BACKUP_DIR}/backupdb.XXXXXX.sql")
+    [[ -z "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")
+    mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $BACKUP_FILE ||
+        ! echo "Cannot create Backup file '$BACKUP_FILE'" >&2 || exit 1
+    echo "    Backup file '$BACKUP_FILE' created"
+    # Set schema version
+    set_schema_version_process
+    migrate
+    del_schema_version_process
+    rm -f "$BACKUP_FILE"
+elif echo $DATABASE_PROCESS | grep -q init ; then   # Empty database. No backup needed
+    echo "    Migrating an empty database"
+    if check_migration_needed ; then
+        migrate
+    fi
+    del_schema_version_process
+
+else  # Recover Migration process
+    BACKUP_FILE=${DATABASE_PROCESS##*backup: }
+    [[ -f "$BACKUP_FILE" ]] || ! echo "Previous migration process fail and cannot recover backup file '$BACKUP_FILE'" >&2 ||
+        exit 1
+    echo "    Previous migration was killed. Restoring database from rollback file'$BACKUP_FILE'"
+    cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM || ! echo "    Cannot load backup file '$BACKUP_FILE'" >&2 || exit 1
+    if check_migration_needed ; then
+        set_schema_version_process
+        migrate
+    fi
+    del_schema_version_process
+    rm -f "$BACKUP_FILE"
+fi
+exit 0
+
+#echo done
+
diff --git a/RO/osm_ro/database_utils/migrations/down/34_remove_wim_tables.sql b/RO/osm_ro/database_utils/migrations/down/34_remove_wim_tables.sql
new file mode 100644 (file)
index 0000000..4400e39
--- /dev/null
@@ -0,0 +1,31 @@
+--
+-- Tear down database structure required for integrating OSM with
+-- Wide Are Network Infrastructure Managers
+--
+
+DROP TABLE IF EXISTS wim_port_mappings;
+DROP TABLE IF EXISTS wim_nfvo_tenants;
+DROP TABLE IF EXISTS instance_wim_nets;
+
+ALTER TABLE `vim_wim_actions` DROP FOREIGN KEY `FK_actions_wims`;
+ALTER TABLE `vim_wim_actions` DROP INDEX `FK_actions_wims`;
+ALTER TABLE `vim_wim_actions` DROP INDEX `item_type_id`;
+ALTER TABLE `vim_wim_actions` MODIFY `item` enum(
+  'datacenters_flavors',
+  'datacenter_images',
+  'instance_nets',
+  'instance_vms',
+  'instance_interfaces',
+  'instance_sfis',
+  'instance_sfs',
+  'instance_classifications',
+  'instance_sfps') NOT NULL
+  COMMENT 'table where the item is stored';
+ALTER TABLE `vim_wim_actions` MODIFY `datacenter_vim_id` varchar(36) NOT NULL;
+ALTER TABLE `vim_wim_actions` DROP `wim_internal_id`, DROP `wim_account_id`;
+ALTER TABLE `vim_wim_actions` RENAME TO `vim_actions`;
+
+DROP TABLE IF EXISTS wim_accounts;
+DROP TABLE IF EXISTS wims;
+
+DELETE FROM schema_version WHERE version_int='34';
diff --git a/RO/osm_ro/database_utils/migrations/down/35_remove_sfc_ingress_and_egress.sql b/RO/osm_ro/database_utils/migrations/down/35_remove_sfc_ingress_and_egress.sql
new file mode 100644 (file)
index 0000000..01f38f4
--- /dev/null
@@ -0,0 +1,16 @@
+--
+-- Removing ingress and egress ports for SFC purposes.
+-- Inserting only one port for ingress and egress.
+--
+
+ALTER TABLE sce_rsp_hops
+  DROP FOREIGN KEY FK_interfaces_rsp_hop_ingress,
+  CHANGE COLUMN ingress_interface_id interface_id VARCHAR(36) NOT NULL
+    AFTER if_order,
+  ADD CONSTRAINT FK_interfaces_rsp_hop
+    FOREIGN KEY (interface_id)
+    REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+  DROP FOREIGN KEY FK_interfaces_rsp_hop_egress,
+  DROP COLUMN egress_interface_id;
+
+DELETE FROM schema_version WHERE version_int='35';
diff --git a/RO/osm_ro/database_utils/migrations/up/34_add_wim_tables.sql b/RO/osm_ro/database_utils/migrations/up/34_add_wim_tables.sql
new file mode 100644 (file)
index 0000000..343f370
--- /dev/null
@@ -0,0 +1,169 @@
+--
+-- Setup database structure required for integrating OSM with
+-- Wide Are Network Infrastructure Managers
+--
+
+DROP TABLE IF EXISTS wims;
+CREATE TABLE wims (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `type` varchar(36) NOT NULL DEFAULT 'odl',
+  `wim_url` varchar(150) NOT NULL,
+  `config` varchar(4000) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name` (`name`)
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIMs managed by the NFVO.';
+
+DROP TABLE IF EXISTS wim_accounts;
+CREATE TABLE wim_accounts (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) DEFAULT NULL,
+  `wim_id` varchar(36) NOT NULL,
+  `created` enum('true','false') NOT NULL DEFAULT 'false',
+  `user` varchar(64) DEFAULT NULL,
+  `password` varchar(64) DEFAULT NULL,
+  `config` varchar(4000) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `wim_name` (`wim_id`,`name`),
+  KEY `FK_wim_accounts_wims` (`wim_id`),
+  CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`)
+    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIM accounts by the user';
+
+DROP TABLE IF EXISTS `wim_nfvo_tenants`;
+CREATE TABLE `wim_nfvo_tenants` (
+  `id` integer NOT NULL AUTO_INCREMENT,
+  `nfvo_tenant_id` varchar(36) NOT NULL,
+  `wim_id` varchar(36) NOT NULL,
+  `wim_account_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
+  KEY `FK_wims_nfvo_tenants` (`wim_id`),
+  KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
+  KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
+  CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`)
+    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`)
+    REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`)
+    REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIM accounts mapping to NFVO tenants';
+
+DROP TABLE IF EXISTS `instance_wim_nets`;
+CREATE TABLE `instance_wim_nets` (
+  `uuid` varchar(36) NOT NULL,
+  `wim_internal_id` varchar(128) DEFAULT NULL
+    COMMENT 'Internal ID used by the WIM to refer to the network',
+  `instance_scenario_id` varchar(36) DEFAULT NULL,
+  `sce_net_id` varchar(36) DEFAULT NULL,
+  `wim_id` varchar(36) DEFAULT NULL,
+  `wim_account_id` varchar(36) NOT NULL,
+  `status` enum(
+    'ACTIVE',
+    'INACTIVE',
+    'DOWN',
+    'BUILD',
+    'ERROR',
+    'WIM_ERROR',
+    'DELETED',
+    'SCHEDULED_CREATION',
+    'SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `wim_info` text,
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+  `created` enum('true','false') NOT NULL DEFAULT 'false'
+      COMMENT 'Created or already exists at WIM',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
+  KEY `FK_instance_wim_nets_wims` (`wim_id`),
+  KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
+  CONSTRAINT `FK_instance_wim_nets_wim_accounts`
+    FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
+  CONSTRAINT `FK_instance_wim_nets_wims`
+    FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`),
+  CONSTRAINT `FK_instance_wim_nets_instance_scenarios`
+    FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`)
+    ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_wim_nets_sce_nets`
+    FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`)
+    ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+  COMMENT='Instances of wim networks';
+
+ALTER TABLE `vim_actions`
+  RENAME TO `vim_wim_actions`;
+ALTER TABLE `vim_wim_actions`
+  ADD `wim_account_id` varchar(36) DEFAULT NULL AFTER `vim_id`,
+  ADD `wim_internal_id` varchar(64) DEFAULT NULL AFTER `wim_account_id`,
+  MODIFY `datacenter_vim_id` varchar(36) DEFAULT NULL,
+  MODIFY `item` enum(
+    'datacenters_flavors',
+    'datacenter_images',
+    'instance_nets',
+    'instance_vms',
+    'instance_interfaces',
+    'instance_sfis',
+    'instance_sfs',
+    'instance_classifications',
+    'instance_sfps',
+    'instance_wim_nets') NOT NULL
+  COMMENT 'table where the item is stored';
+ALTER TABLE `vim_wim_actions`
+  ADD INDEX `item_type_id` (`item`, `item_id`);
+ALTER TABLE `vim_wim_actions`
+  ADD INDEX `FK_actions_wims` (`wim_account_id`);
+ALTER TABLE `vim_wim_actions`
+  ADD CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`)
+  REFERENCES `wim_accounts` (`uuid`)
+  ON UPDATE CASCADE ON DELETE CASCADE;
+
+DROP TABLE IF EXISTS `wim_port_mappings`;
+CREATE TABLE `wim_port_mappings` (
+  `id` integer NOT NULL AUTO_INCREMENT,
+  `wim_id` varchar(36) NOT NULL,
+  `datacenter_id` varchar(36) NOT NULL,
+  `pop_switch_dpid` varchar(64) NOT NULL,
+  `pop_switch_port` varchar(64) NOT NULL,
+  `wan_service_endpoint_id` varchar(256) NOT NULL
+      COMMENT 'In case the WIM plugin relies on the wan_service_mapping_info'
+      COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
+      /* In other words: wan_service_endpoint_id = f(wan_service_mapping_info)
+       * where f is a injective function'
+       */
+  `wan_service_mapping_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `unique_datacenter_port_mapping`
+    (`datacenter_id`, `pop_switch_dpid`, `pop_switch_port`),
+  UNIQUE KEY `unique_wim_port_mapping`
+    (`wim_id`, `wan_service_endpoint_id`),
+  KEY `FK_wims_wim_physical_connections` (`wim_id`),
+  KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
+  CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`)
+    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`)
+    REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8
+COMMENT='WIM port mappings managed by the WIM.';
+
+-- Update Schema with DB version
+INSERT INTO schema_version
+VALUES (34, '0.34', '0.6.00', 'Added WIM tables', '2018-09-10');
diff --git a/RO/osm_ro/database_utils/migrations/up/35_add_sfc_ingress_and_egress.sql b/RO/osm_ro/database_utils/migrations/up/35_add_sfc_ingress_and_egress.sql
new file mode 100644 (file)
index 0000000..b528c6d
--- /dev/null
@@ -0,0 +1,29 @@
+--
+-- Adding different ingress and egress ports for SFC.
+--
+
+ALTER TABLE sce_rsp_hops
+  DROP FOREIGN KEY FK_interfaces_rsp_hop,
+  CHANGE COLUMN interface_id ingress_interface_id VARCHAR(36) NOT NULL
+    AFTER if_order,
+  ADD CONSTRAINT FK_interfaces_rsp_hop_ingress
+    FOREIGN KEY (ingress_interface_id)
+    REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+  ADD COLUMN egress_interface_id VARCHAR(36) NULL DEFAULT NULL
+    AFTER ingress_interface_id;
+
+UPDATE sce_rsp_hops
+  SET egress_interface_id = ingress_interface_id;
+
+ALTER TABLE sce_rsp_hops
+  ALTER COLUMN egress_interface_id DROP DEFAULT;
+
+ALTER TABLE sce_rsp_hops
+  MODIFY COLUMN egress_interface_id VARCHAR(36) NOT NULL
+    AFTER ingress_interface_id,
+  ADD CONSTRAINT FK_interfaces_rsp_hop_egress
+    FOREIGN KEY (egress_interface_id)
+    REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE;
+
+INSERT INTO schema_version (version_int, version, openmano_ver, comments, date)
+  VALUES (35, '0.35', '0.6.02', 'Adding ingress and egress ports for RSPs', '2018-12-11');
diff --git a/RO/osm_ro/db_base.py b/RO/osm_ro/db_base.py
new file mode 100644 (file)
index 0000000..9c13133
--- /dev/null
@@ -0,0 +1,815 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Base class for openmano database manipulation
+'''
+__author__="Alfonso Tierno"
+__date__ ="$4-Apr-2016 10:05:01$"
+
+import MySQLdb as mdb
+import uuid as myUuid
+from osm_ro import utils as af
+import json
+#import yaml
+import time
+import logging
+import datetime
+from contextlib import contextmanager
+from functools import wraps, partial
+from threading import Lock
+from jsonschema import validate as js_v, exceptions as js_e
+
+from .http_tools import errors as httperrors
+from .utils import Attempt, get_arg, inject_args
+
+
+RECOVERY_TIME = 3
+
+_ATTEMPT = Attempt()
+
+
+def with_transaction(fn=None, cursor=None):
+    """Decorator that can be used together with instances of the ``db_base``
+    class, to perform database actions wrapped in a commit/rollback fashion
+
+    This decorator basically executes the function inside the context object
+    given by the ``transaction`` method in ``db_base``
+
+    Arguments:
+        cursor: [Optional] cursor class
+    """
+    if fn is None:  # Allows calling the decorator directly or with parameters
+        return partial(with_transaction, cursor=cursor)
+
+    @wraps(fn)
+    def _wrapper(self, *args, **kwargs):
+        cursor_type = None
+        if cursor == 'dict':
+            # MySQLdB define the "cursors" module attribute lazily,
+            # so we have to defer references to mdb.cursors.DictCursor
+            cursor_type = mdb.cursors.DictCursor
+
+        with self.transaction(cursor_type):
+            return fn(self, *args, **kwargs)
+
+    return _wrapper
+
+
+def retry(fn=None, max_attempts=Attempt.MAX, **info):
+    """Decorator that can be used together with instances of the ``db_base``
+    class, to replay a method again after a unexpected error.
+
+    The function being decorated needs to either be a method of ``db_base``
+    subclasses or accept an ``db_base`` instance as the first parameter.
+
+    All the extra keyword arguments will be passed to the ``_format_error``
+    method
+    """
+    if fn is None:  # Allows calling the decorator directly or with parameters
+        return partial(retry, max_attempts=max_attempts, **info)
+
+    @wraps(fn)
+    def _wrapper(*args, **kwargs):
+        self = args[0]
+        info.setdefault('table', get_arg('table', fn, args, kwargs))
+        attempt = Attempt(max_attempts=max_attempts, info=info)
+        while attempt.countdown >= 0:
+            try:
+                return inject_args(fn, attempt=attempt)(*args, **kwargs)
+            except (mdb.Error, AttributeError) as ex:
+                self.logger.debug("Attempt #%d", attempt.number)
+                try:
+                    # The format error will throw exceptions, however it can
+                    # tolerate a certain amount of retries if it judges that
+                    # the error can be solved with retrying
+                    self._format_error(ex, attempt.countdown, **attempt.info)
+                    # Anyway, unexpected/unknown errors can still be retried
+                except db_base_Exception as db_ex:
+                    if (attempt.countdown < 0 or db_ex.http_code !=
+                            httperrors.Internal_Server_Error):
+                        raise
+
+            attempt.count += 1
+
+    return _wrapper
+
+
+def _check_valid_uuid(uuid):
+    id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+    id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
+    try:
+        js_v(uuid, id_schema)
+        return True
+    except js_e.ValidationError:
+        try:
+            js_v(uuid, id_schema2)
+            return True
+        except js_e.ValidationError:
+            return False
+    return False
+
+def _convert_datetime2str(var):
+    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+    It enters recursively in the dict var finding this kind of variables
+    '''
+    if type(var) is dict:
+        for k,v in var.items():
+            if type(v) is datetime.datetime:
+                var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
+            elif type(v) is dict or type(v) is list or type(v) is tuple:
+                _convert_datetime2str(v)
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for v in var:
+            _convert_datetime2str(v)
+
+def _convert_bandwidth(data, reverse=False, logger=None):
+    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
+    It assumes that bandwidth is well formed
+    Attributes:
+        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+        'reverse': by default convert form str to int (Mbps), if True it convert from number to units
+    Return:
+        None
+    '''
+    if type(data) is dict:
+        for k in data.keys():
+            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+                _convert_bandwidth(data[k], reverse, logger)
+        if "bandwidth" in data:
+            try:
+                value=str(data["bandwidth"])
+                if not reverse:
+                    pos = value.find("bps")
+                    if pos>0:
+                        if value[pos-1]=="G": data["bandwidth"] =  int(data["bandwidth"][:pos-1]) * 1000
+                        elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) // 1000
+                        else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
+                else:
+                    value = int(data["bandwidth"])
+                    if value % 1000 == 0:
+                        data["bandwidth"] = str(value // 1000) + " Gbps"
+                    else:
+                        data["bandwidth"] = str(value) + " Mbps"
+            except:
+                if logger:
+                    logger.error("convert_bandwidth exception for type '%s' data '%s'", type(data["bandwidth"]), data["bandwidth"])
+                return
+    if type(data) is tuple or type(data) is list:
+        for k in data:
+            if type(k) is dict or type(k) is tuple or type(k) is list:
+                _convert_bandwidth(k, reverse, logger)
+
+def _convert_str2boolean(data, items):
+    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
+    Done recursively
+    Attributes:
+        'data': dictionary variable to be checked. None or empty is considered valid
+        'items': tuple of keys to convert
+    Return:
+        None
+    '''
+    if type(data) is dict:
+        for k in data.keys():
+            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+                _convert_str2boolean(data[k], items)
+            if k in items:
+                if type(data[k]) is str:
+                    if   data[k]=="false" or data[k]=="False" or data[k]=="0": data[k]=False
+                    elif data[k]=="true"  or data[k]=="True" or data[k]=="1":  data[k]=True
+                elif type(data[k]) is int:
+                    if   data[k]==0: data[k]=False
+                    elif  data[k]==1:  data[k]=True
+    if type(data) is tuple or type(data) is list:
+        for k in data:
+            if type(k) is dict or type(k) is tuple or type(k) is list:
+                _convert_str2boolean(k, items)
+
+class db_base_Exception(httperrors.HttpMappedError):
+    '''Common Exception for all database exceptions'''
+
+    def __init__(self, message, http_code=httperrors.Bad_Request):
+        super(db_base_Exception, self).__init__(message, http_code)
+
+class db_base():
+    tables_with_created_field=()
+
+    def __init__(self, host=None, user=None, passwd=None, database=None,
+                 log_name='db', log_level=None, lock=None):
+        self.host = host
+        self.user = user
+        self.passwd = passwd
+        self.database = database
+        self.con = None
+        self.log_level=log_level
+        self.logger = logging.getLogger(log_name)
+        if self.log_level:
+            self.logger.setLevel( getattr(logging, log_level) )
+        self.lock = lock or Lock()
+
+    def connect(self, host=None, user=None, passwd=None, database=None):
+        '''Connect to specific data base.
+        The first time a valid host, user, passwd and database must be provided,
+        Following calls can skip this parameters
+        '''
+        try:
+            if host:        self.host = host
+            if user:        self.user = user
+            if passwd:      self.passwd = passwd
+            if database:    self.database = database
+
+            self.con = mdb.connect(self.host, self.user, self.passwd, self.database)
+            self.logger.debug("DB: connected to '%s' at '%s@%s'", self.database, self.user, self.host)
+        except mdb.Error as e:
+            raise db_base_Exception("Cannot connect to DataBase '{}' at '{}@{}' Error {}: {}".format(
+                                    self.database, self.user, self.host, e.args[0], e.args[1]),
+                                    http_code = httperrors.Unauthorized )
+
+    def escape(self, value):
+        return self.con.escape(value)
+
+    def escape_string(self, value):
+        return self.con.escape_string(value)
+
+    @retry
+    @with_transaction
+    def get_db_version(self):
+        ''' Obtain the database schema version.
+        Return: (negative, text) if error or version 0.0 where schema_version table is missing
+                (version_int, version_text) if ok
+        '''
+        cmd = "SELECT version_int,version FROM schema_version"
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+        highest_version_int=0
+        highest_version=""
+        for row in rows: #look for the latest version
+            if row[0]>highest_version_int:
+                highest_version_int, highest_version = row[0:2]
+        return highest_version_int, highest_version
+
+    def disconnect(self):
+        '''disconnect from specific data base'''
+        try:
+            self.con.close()
+            self.con = None
+        except mdb.Error as e:
+            self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+            return
+        except AttributeError as e: #self.con not defined
+            if e[0][-5:] == "'con'":
+                self.logger.warning("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+                return
+            else:
+                raise
+
+    def reconnect(self):
+        """Try to gracefully to the database in case of error"""
+        try:
+            self.con.ping(True)  # auto-reconnect if the server is available
+        except:
+            # The server is probably not available...
+            # Let's wait a bit
+            time.sleep(RECOVERY_TIME)
+            self.con = None
+            self.connect()
+
+    def fork_connection(self):
+        """Return a new database object, with a separated connection to the
+        database (and lock), so it can act independently
+        """
+        obj =  self.__class__(
+            host=self.host,
+            user=self.user,
+            passwd=self.passwd,
+            database=self.database,
+            log_name=self.logger.name,
+            log_level=self.log_level,
+            lock=Lock()
+        )
+
+        obj.connect()
+
+        return obj
+
+    @contextmanager
+    def transaction(self, cursor_type=None):
+        """DB changes that are executed inside this context will be
+        automatically rolled back in case of error.
+
+        This implementation also adds a lock, so threads sharing the same
+        connection object are synchronized.
+
+        Arguments:
+            cursor_type: default: MySQLdb.cursors.DictCursor
+
+        Yields:
+            Cursor object
+
+        References:
+            https://www.oreilly.com/library/view/mysql-cookbook-2nd/059652708X/ch15s08.html
+            https://github.com/PyMySQL/mysqlclient-python/commit/c64915b1e5c705f4fb10e86db5dcfed0b58552cc
+        """
+        # Previously MySQLdb had built-in support for that using the context
+        # API for the connection object.
+        # This support was removed in version 1.40
+        # https://github.com/PyMySQL/mysqlclient-python/blob/master/HISTORY.rst#whats-new-in-140
+        with self.lock:
+            try:
+                if self.con.get_autocommit():
+                    self.con.query("BEGIN")
+
+                self.cur = self.con.cursor(cursor_type)
+                yield self.cur
+            except:  # noqa
+                self.con.rollback()
+                raise
+            else:
+                self.con.commit()
+
+
+    def _format_error(self, e, tries=1, command=None,
+                      extra=None, table=None, cmd=None, **_):
+        '''Creates a text error base on the produced exception
+            Params:
+                e: mdb exception
+                retry: in case of timeout, if reconnecting to database and retry, or raise and exception
+                cmd: database command that produce the exception
+                command: if the intention is update or delete
+                extra: extra information to add to some commands
+            Return
+                HTTP error in negative, formatted error text
+        '''  # the **_ ignores extra kwargs
+        table_info = ' (table `{}`)'.format(table) if table else ''
+        if cmd:
+            self.logger.debug("Exception '%s' with command '%s'%s", e, cmd, table_info)
+
+        if isinstance(e,AttributeError ):
+            self.logger.debug(str(e), exc_info=True)
+            raise db_base_Exception("DB Exception " + str(e), httperrors.Internal_Server_Error)
+        if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or)))    Exception 2013: Lost connection to MySQL server during query
+            # Let's aways reconnect if the connection is lost
+            # so future calls are not affected.
+            self.reconnect()
+
+            if tries > 1:
+                self.logger.warning("DB Exception '%s'. Retry", str(e))
+                return
+            else:
+                raise db_base_Exception("Database connection timeout Try Again", httperrors.Request_Timeout)
+
+        fk=e.args[1].find("foreign key constraint fails")
+        if fk>=0:
+            if command=="update":
+                raise db_base_Exception("tenant_id '{}' not found.".format(extra), httperrors.Not_Found)
+            elif command=="delete":
+                raise db_base_Exception("Resource is not free. There are {} that prevent deleting it.".format(extra), httperrors.Conflict)
+        de = e.args[1].find("Duplicate entry")
+        fk = e.args[1].find("for key")
+        uk = e.args[1].find("Unknown column")
+        wc = e.args[1].find("in 'where clause'")
+        fl = e.args[1].find("in 'field list'")
+        #print de, fk, uk, wc,fl
+        if de>=0:
+            if fk>=0: #error 1062
+                raise db_base_Exception(
+                    "Value {} already in use for {}{}".format(
+                        e.args[1][de+15:fk], e.args[1][fk+7:], table_info),
+                    httperrors.Conflict)
+        if uk>=0:
+            if wc>=0:
+                raise db_base_Exception(
+                    "Field {} can not be used for filtering{}".format(
+                        e.args[1][uk+14:wc], table_info),
+                    httperrors.Bad_Request)
+            if fl>=0:
+                raise db_base_Exception(
+                    "Field {} does not exist{}".format(
+                        e.args[1][uk+14:wc], table_info),
+                    httperrors.Bad_Request)
+        raise db_base_Exception(
+                "Database internal Error{} {}: {}".format(
+                    table_info, e.args[0], e.args[1]),
+                httperrors.Internal_Server_Error)
+
+    def __str2db_format(self, data):
+        """Convert string data to database format.
+        If data is None it returns the 'Null' text,
+        otherwise it returns the text surrounded by quotes ensuring internal quotes are escaped.
+        """
+        if data is None:
+            return 'Null'
+        elif isinstance(data[1], str):
+            return json.dumps(data)
+        else:
+            return json.dumps(str(data))
+
+    def __tuple2db_format_set(self, data):
+        """Compose the needed text for a SQL SET, parameter 'data' is a pair tuple (A,B),
+        and it returns the text 'A="B"', where A is a field of a table and B is the value
+        If B is None it returns the 'A=Null' text, without surrounding Null by quotes
+        If B is not None it returns the text "A='B'" or 'A="B"' where B is surrounded by quotes,
+        and it ensures internal quotes of B are escaped.
+        B can be also a dict with special keys:
+            {"INCREMENT": NUMBER}, then it produce "A=A+NUMBER"
+        """
+        if data[1] is None:
+            return str(data[0]) + "=Null"
+        elif isinstance(data[1], str):
+            return str(data[0]) + '=' + json.dumps(data[1])
+        elif isinstance(data[1], dict):
+            if "INCREMENT" in data[1]:
+                return "{A}={A}{N:+d}".format(A=data[0], N=data[1]["INCREMENT"])
+            raise db_base_Exception("Format error for UPDATE field: {!r}".format(data[0]))
+        else:
+            return str(data[0]) + '=' + json.dumps(str(data[1]))
+
+    def __create_where(self, data, use_or=None):
+        """
+        Compose the needed text for a SQL WHERE, parameter 'data' can be a dict or a list of dict. By default lists are
+        concatenated with OR and dict with AND, unless parameter 'use_or' indicates other thing.
+        If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+            If value is None, it will produce 'key is null'
+            If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+            keys can be suffixed by >,<,<>,>=,<=,' LIKE ' so that this is used to compare key and value instead of "="
+        The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+        If a list, each item will be a dictionary that will be concatenated with OR by default
+        :param data: dict or list of dicts
+        :param use_or: Can be None (use default behaviour), True (use OR) or False (use AND)
+        :return: a string with the content to send to mysql
+        """
+        cmd = []
+        if isinstance(data, dict):
+            for k, v in data.items():
+                if k == "OR":
+                    cmd.append("(" + self.__create_where(v, use_or=True) + ")")
+                    continue
+                elif k == "AND":
+                    cmd.append("(" + self.__create_where(v, use_or=False) + ")")
+                    continue
+
+                if k.endswith(">") or k.endswith("<") or k.endswith("=") or k.endswith(" LIKE "):
+                    pass
+                else:
+                    k += "="
+
+                if v is None:
+                    cmd.append(k.replace("=", " is").replace("<>", " is not") + " Null")
+                elif isinstance(v, (tuple, list)):
+                    cmd2 = []
+                    for v2 in v:
+                        if v2 is None:
+                            cmd2.append(k.replace("=", " is").replace("<>", " is not") + " Null")
+                        elif isinstance(v2, str):
+                            cmd2.append(k + json.dumps(v2))
+                        else:
+                            cmd2.append(k + json.dumps(str(v2)))
+                    cmd.append("(" + " OR ".join(cmd2) + ")")
+                elif isinstance(v, str):
+                    cmd.append(k + json.dumps(v))
+                else:
+                    cmd.append(k + json.dumps(str(v)))
+        elif isinstance(data, (tuple, list)):
+            if use_or is None:
+                use_or = True
+            for k in data:
+                cmd.append("(" + self.__create_where(k) + ")")
+        else:
+            raise db_base_Exception("invalid WHERE clause at '{}'".format(data))
+        if use_or:
+            return " OR ".join(cmd)
+        return " AND ".join(cmd)
+
+    def __remove_quotes(self, data):
+        '''remove single quotes ' of any string content of data dictionary'''
+        for k,v in data.items():
+            if type(v) == str:
+                if "'" in v:
+                    data[k] = data[k].replace("'","_")
+
+    def _update_rows(self, table, UPDATE, WHERE, modified_time=0):
+        """ Update one or several rows of a table.
+        :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
+        :param table: database table to update
+        :param WHERE: dict or list of dicts to compose the SQL WHERE clause.
+            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+                If value is None, it will produce 'key is null'
+                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+            If a list, each item will be a dictionary that will be concatenated with OR
+        :return: the number of updated rows, raises exception upon error
+        """
+        # gettting uuid
+        values = ",".join(map(self.__tuple2db_format_set, UPDATE.items() ))
+        if modified_time:
+            values += ",modified_at={:f}".format(modified_time)
+        cmd= "UPDATE " + table + " SET " + values + " WHERE " + self.__create_where(WHERE)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        return self.cur.rowcount
+
+    def _new_uuid(self, root_uuid=None, used_table=None, created_time=0):
+        """
+        Generate a new uuid. It DOES NOT begin or end the transaction, so self.con.cursor must be created
+        :param root_uuid: master uuid of the transaction
+        :param used_table: the table this uuid is intended for
+        :param created_time: time of creation
+        :return: the created uuid
+        """
+
+        uuid = str(myUuid.uuid1())
+        # defining root_uuid if not provided
+        if root_uuid is None:
+            root_uuid = uuid
+        if created_time:
+            created_at = created_time
+        else:
+            created_at = time.time()
+        # inserting new uuid
+        cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(
+            uuid, root_uuid, used_table, created_at)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        return uuid
+
+    def _new_row_internal(self, table, INSERT, add_uuid=False, root_uuid=None, created_time=0, confidential_data=False):
+        ''' Add one row into a table. It DOES NOT begin or end the transaction, so self.con.cursor must be created
+        Attribute
+            INSERT: dictionary with the key:value to insert
+            table: table where to insert
+            add_uuid: if True, it will create an uuid key entry at INSERT if not provided
+            created_time: time to add to the created_at column
+        It checks presence of uuid and add one automatically otherwise
+        Return: uuid
+        '''
+
+        if add_uuid:
+            #create uuid if not provided
+            if 'uuid' not in INSERT:
+                uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
+            else:
+                uuid = str(INSERT['uuid'])
+        else:
+            uuid=None
+        if add_uuid:
+            #defining root_uuid if not provided
+            if root_uuid is None:
+                root_uuid = uuid
+            if created_time:
+                created_at = created_time
+            else:
+                created_at=time.time()
+            #inserting new uuid
+            cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(uuid, root_uuid, table, created_at)
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+        #insertion
+        cmd= "INSERT INTO " + table +" SET " + \
+            ",".join(map(self.__tuple2db_format_set, INSERT.items() ))
+        if created_time:
+            cmd += ",created_at={time:.9f},modified_at={time:.9f}".format(time=created_time)
+        if confidential_data:
+            index = cmd.find("SET")
+            subcmd = cmd[:index] + 'SET...'
+            self.logger.debug(subcmd)
+        else:
+            self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        self.cur.rowcount
+        return uuid
+
+    def _get_rows(self,table,uuid):
+        cmd = "SELECT * FROM {} WHERE uuid='{}'".format(str(table), str(uuid))
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+        return rows
+
+    @retry
+    @with_transaction
+    def new_row(self, table, INSERT, add_uuid=False, created_time=0, confidential_data=False):
+        ''' Add one row into a table.
+        Attribute
+            INSERT: dictionary with the key: value to insert
+            table: table where to insert
+            tenant_id: only useful for logs. If provided, logs will use this tenant_id
+            add_uuid: if True, it will create an uuid key entry at INSERT if not provided
+        It checks presence of uuid and add one automatically otherwise
+        Return: uuid
+        '''
+        if table in self.tables_with_created_field and created_time==0:
+            created_time=time.time()
+        return self._new_row_internal(table, INSERT, add_uuid, None, created_time, confidential_data)
+
+    @retry
+    @with_transaction
+    def update_rows(self, table, UPDATE, WHERE, modified_time=None, attempt=_ATTEMPT):
+        """ Update one or several rows of a table.
+        :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
+        :param table: database table to update
+        :param WHERE: dict or list of dicts to compose the SQL WHERE clause.
+            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+                If value is None, it will produce 'key is null'
+                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+            If a list, each item will be a dictionary that will be concatenated with OR
+        :param modified_time: Can contain the time to be set to the table row.
+            None to set automatically, 0 to do not modify it
+        :return: the number of updated rows, raises exception upon error
+        """
+        if table in self.tables_with_created_field and modified_time is None:
+            modified_time = time.time()
+
+        return self._update_rows(table, UPDATE, WHERE, modified_time)
+
+    def _delete_row_by_id_internal(self, table, uuid):
+        cmd = "DELETE FROM {} WHERE uuid = '{}'".format(table, uuid)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        deleted = self.cur.rowcount
+        # delete uuid
+        self.cur = self.con.cursor()
+        cmd = "DELETE FROM uuids WHERE root_uuid = '{}'".format(uuid)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        return deleted
+
+    @retry(command='delete', extra='dependencies')
+    @with_transaction
+    def delete_row_by_id(self, table, uuid):
+        return self._delete_row_by_id_internal(table, uuid)
+
+    @retry
+    def delete_row(self, attempt=_ATTEMPT, **sql_dict):
+        """ Deletes rows from a table.
+        :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
+        :param FROM: string with table name (Mandatory)
+        :param WHERE: dict or list of dicts to compose the SQL WHERE clause. (Optional)
+            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+                If value is None, it will produce 'key is null'
+                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+            If a list, each item will be a dictionary that will be concatenated with OR
+        :return: the number of deleted rows, raises exception upon error
+        """
+        # print sql_dict
+        cmd = "DELETE FROM " + str(sql_dict['FROM'])
+        if sql_dict.get('WHERE'):
+            cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
+        if sql_dict.get('LIMIT'):
+            cmd += " LIMIT " + str(sql_dict['LIMIT'])
+
+        attempt.info['cmd'] = cmd
+
+        with self.transaction():
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            deleted = self.cur.rowcount
+        return deleted
+
+    @retry
+    @with_transaction(cursor='dict')
+    def get_rows_by_id(self, table, uuid, attempt=_ATTEMPT):
+        '''get row from a table based on uuid'''
+        cmd="SELECT * FROM {} where uuid='{}'".format(str(table), str(uuid))
+        attempt.info['cmd'] = cmd
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+        return rows
+
+    @retry
+    def get_rows(self, attempt=_ATTEMPT, **sql_dict):
+        """ Obtain rows from a table.
+        :param SELECT: list or tuple of fields to retrieve) (by default all)
+        :param FROM: string with table name (Mandatory)
+        :param WHERE: dict or list of dicts to compose the SQL WHERE clause. (Optional)
+            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
+                If value is None, it will produce 'key is null'
+                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
+                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
+                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
+            If a list, each item will be a dictionary that will be concatenated with OR
+        :param LIMIT: limit the number of obtained entries (Optional)
+        :param ORDER_BY:  list or tuple of fields to order, add ' DESC' to each item if inverse order is required
+        :return: a list with dictionaries at each row, raises exception upon error
+        """
+        # print sql_dict
+        cmd = "SELECT "
+        if 'SELECT' in sql_dict:
+            if isinstance(sql_dict['SELECT'], (tuple, list)):
+                cmd += ",".join(map(str, sql_dict['SELECT']))
+            else:
+                cmd += sql_dict['SELECT']
+        else:
+            cmd += "*"
+
+        cmd += " FROM " + str(sql_dict['FROM'])
+        if sql_dict.get('WHERE'):
+            cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
+
+        if 'ORDER_BY' in sql_dict:
+            cmd += " ORDER BY "
+            if isinstance(sql_dict['ORDER_BY'], (tuple, list)):
+                cmd += ",".join(map(str, sql_dict['ORDER_BY']))
+            else:
+                cmd += str(sql_dict['ORDER_BY'])
+
+        if 'LIMIT' in sql_dict:
+            cmd += " LIMIT " + str(sql_dict['LIMIT'])
+
+        attempt.info['cmd'] = cmd
+
+        with self.transaction(mdb.cursors.DictCursor):
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            rows = self.cur.fetchall()
+            return rows
+
+    @retry
+    def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_several=False, WHERE_OR={}, WHERE_AND_OR="OR", attempt=_ATTEMPT):
+        ''' Obtain One row from a table based on name or uuid.
+        Attribute:
+            table: string of table name
+            uuid_name: name or uuid. If not uuid format is found, it is considered a name
+            allow_several: if False return ERROR if more than one row are found
+            error_item_text: in case of error it identifies the 'item' name for a proper output text
+            'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
+            'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional
+        Return: if allow_several==False, a dictionary with this row, or error if no item is found or more than one is found
+                if allow_several==True, a list of dictionaries with the row or rows, error if no item is found
+        '''
+
+        if error_item_text==None:
+            error_item_text = table
+        what = 'uuid' if af.check_valid_uuid(uuid_name) else 'name'
+        cmd = " SELECT * FROM {} WHERE {}='{}'".format(table, what, uuid_name)
+        if WHERE_OR:
+            where_or = self.__create_where(WHERE_OR, use_or=True)
+            if WHERE_AND_OR == "AND":
+                cmd += " AND (" + where_or + ")"
+            else:
+                cmd += " OR " + where_or
+
+        attempt.info['cmd'] = cmd
+
+        with self.transaction(mdb.cursors.DictCursor):
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            number = self.cur.rowcount
+            if number == 0:
+                raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Not_Found)
+            elif number > 1 and not allow_several:
+                raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Conflict)
+            if allow_several:
+                rows = self.cur.fetchall()
+            else:
+                rows = self.cur.fetchone()
+            return rows
+
+    @retry(table='uuids')
+    @with_transaction(cursor='dict')
+    def get_uuid(self, uuid):
+        '''check in the database if this uuid is already present'''
+        self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'")
+        rows = self.cur.fetchall()
+        return self.cur.rowcount, rows
+
+    @retry
+    @with_transaction(cursor='dict')
+    def get_uuid_from_name(self, table, name):
+        '''Searchs in table the name and returns the uuid
+        '''
+        where_text = "name='" + name +"'"
+        self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text)
+        rows = self.cur.fetchall()
+        if self.cur.rowcount==0:
+            return 0, "Name {} not found in table {}".format(name, table)
+        elif self.cur.rowcount>1:
+            return self.cur.rowcount, "More than one VNF with name {} found in table {}".format(name, table)
+        return self.cur.rowcount, rows[0]["uuid"]
diff --git a/RO/osm_ro/http_tools/__init__.py b/RO/osm_ro/http_tools/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/RO/osm_ro/http_tools/errors.py b/RO/osm_ro/http_tools/errors.py
new file mode 100644 (file)
index 0000000..552e85b
--- /dev/null
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+import logging
+from functools import wraps
+
+import bottle
+import yaml
+
+Bad_Request = 400
+Unauthorized = 401
+Not_Found = 404
+Forbidden = 403
+Method_Not_Allowed = 405
+Not_Acceptable = 406
+Request_Timeout = 408
+Conflict = 409
+Service_Unavailable = 503
+Internal_Server_Error = 500
+
+
+class HttpMappedError(Exception):
+    """Base class for a new hierarchy that translate HTTP error codes
+    to python exceptions
+
+    This class accepts an extra argument ``http_code`` (integer
+    representing HTTP error codes).
+    """
+
+    def __init__(self, message, http_code=Internal_Server_Error):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+
+class ErrorHandler(object):
+    """Defines a default strategy for handling HttpMappedError.
+
+    This class implements a wrapper (can also be used as decorator), that
+    watches out for different exceptions and log them accordingly.
+
+    Arguments:
+        logger(logging.Logger): logger object to be used to report errors
+    """
+    def __init__(self, logger=None):
+        self.logger = logger or logging.getLogger('openmano.http')
+
+    def __call__(self, function):
+        @wraps(function)
+        def _wraped(*args, **kwargs):
+            try:
+                return function(*args, **kwargs)
+            except bottle.HTTPError:
+                raise
+            except HttpMappedError as ex:
+                self.logger.error(
+                    "%s error %s",
+                    function.__name__, ex.http_code, exc_info=True)
+                bottle.abort(ex.http_code, str(ex))
+            except yaml.YAMLError as ex:
+                self.logger.error(
+                    "YAML error while trying to serialize/unserialize fields",
+                    exc_info=True)
+                bottle.abort(Bad_Request, type(ex).__name__ + ": " + str(ex))
+            except Exception as ex:
+                self.logger.error("Unexpected exception: ", exc_info=True)
+                bottle.abort(Internal_Server_Error,
+                             type(ex).__name__ + ": " + str(ex))
+
+        return _wraped
diff --git a/RO/osm_ro/http_tools/handler.py b/RO/osm_ro/http_tools/handler.py
new file mode 100644 (file)
index 0000000..49249a8
--- /dev/null
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from types import MethodType
+
+from bottle import Bottle
+
+
+class route(object):
+    """Decorator that stores route information, so creating the routes can be
+    postponed.
+
+    This allows methods (OOP) with bottle.
+
+    Arguments:
+        method: HTTP verb (e.g. ``'get'``, ``'post'``, ``'put'``, ...)
+        path: URL path that will be handled by the callback
+    """
+    def __init__(self, method, path, **kwargs):
+        kwargs['method'] = method.upper()
+        self.route_info = (path, kwargs)
+
+    def __call__(self, function):
+        function.route_info = self.route_info
+        return function
+
+
+class BaseHandler(object):
+    """Base class that allows isolated webapp implementation using Bottle,
+    when used in conjunction with the ``route`` decorator.
+
+    In this context, a ``Handler`` is meant to be a collection of Bottle
+    routes/callbacks related to a specific topic.
+
+    A ``Handler`` instance can produce a WSGI app that can be mounted or merged
+    inside another more general bottle app.
+
+    Example:
+
+        from http_tools.handler import Handler, route
+        from http_tools.errors import ErrorHandler
+
+        class MyHandler(Handler):
+            plugins = [ErrorHandler()]
+            url_base = '/my/url/base'
+
+            @route('GET', '/some/path/<var>')
+            def get_var(self, var):
+                return var
+
+        app = MyHandler.wsgi_app
+        # ^  Webapp with a `GET /my/url/base/some/path/<var>` route
+    """
+    _wsgi_app = None
+
+    url_base = ''
+    """String representing a path fragment to be prepended to the routes"""
+
+    plugins = []
+    """Bottle plugins to be installed when creating the WSGI app"""
+
+    @property
+    def wsgi_app(self):
+        """Create a WSGI app based on the implemented callbacks"""
+
+        if self._wsgi_app:
+            # Return if cached
+            return self._wsgi_app
+
+        app = Bottle()
+
+        members = (getattr(self, m) for m in dir(self) if m != 'wsgi_app')
+        callbacks = (m for m in members
+                     if isinstance(m, MethodType) and hasattr(m, 'route_info'))
+
+        for callback in callbacks:
+            path, kwargs = callback.route_info
+            kwargs.update(callback=callback, apply=self.plugins)
+            app.route(self.url_base + path, **kwargs)
+
+        self._wsgi_app = app
+
+        return app
diff --git a/RO/osm_ro/http_tools/request_processing.py b/RO/osm_ro/http_tools/request_processing.py
new file mode 100644 (file)
index 0000000..13e19ed
--- /dev/null
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+
+#
+# Util functions previously in `httpserver`
+#
+
+__author__ = "Alfonso Tierno, Gerardo Garcia"
+
+import json
+import logging
+
+import bottle
+import yaml
+from jsonschema import exceptions as js_e
+from jsonschema import validate as js_v
+
+from . import errors as httperrors
+from io import TextIOWrapper
+
+logger = logging.getLogger('openmano.http')
+
+
+def remove_clear_passwd(data):
+    """
+    Removes clear passwords from the data received
+    :param data: data with clear password
+    :return: data without the password information
+    """
+
+    passw = ['password: ', 'passwd: ']
+
+    for pattern in passw:
+        init = data.find(pattern)
+        while init != -1:
+            end = data.find('\n', init)
+            data = data[:init] + '{}******'.format(pattern) + data[end:]
+            init += 1
+            init = data.find(pattern, init)
+    return data
+
+
+def change_keys_http2db(data, http_db, reverse=False):
+    '''Change keys of dictionary data acording to the key_dict values
+    This allow change from http interface names to database names.
+    When reverse is True, the change is otherwise
+    Attributes:
+        data: can be a dictionary or a list
+        http_db: is a dictionary with hhtp names as keys and database names as value
+        reverse: by default change is done from http api to database.
+            If True change is done otherwise.
+    Return: None, but data is modified'''
+    if type(data) is tuple or type(data) is list:
+        for d in data:
+            change_keys_http2db(d, http_db, reverse)
+    elif type(data) is dict or type(data) is bottle.FormsDict:
+        if reverse:
+            for k,v in http_db.items():
+                if v in data: data[k]=data.pop(v)
+        else:
+            for k,v in http_db.items():
+                if k in data: data[v]=data.pop(k)
+
+
+def format_out(data):
+    '''Return string of dictionary data according to requested json, yaml, xml.
+    By default json
+    '''
+    logger.debug("OUT: " + yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, allow_unicode=True) )
+    accept = bottle.request.headers.get('Accept')
+    if accept and 'application/yaml' in accept:
+        bottle.response.content_type='application/yaml'
+        return yaml.safe_dump(
+                data, explicit_start=True, indent=4, default_flow_style=False,
+                tags=False, allow_unicode=True) #, canonical=True, default_style='"'
+    else: #by default json
+        bottle.response.content_type='application/json'
+        #return data #json no style
+        return json.dumps(data, indent=4) + "\n"
+
+
+def format_in(default_schema, version_fields=None, version_dict_schema=None, confidential_data=False):
+    """
+    Parse the content of HTTP request against a json_schema
+
+    :param default_schema: The schema to be parsed by default
+        if no version field is found in the client data.
+        In None no validation is done
+    :param version_fields: If provided it contains a tuple or list with the
+        fields to iterate across the client data to obtain the version
+    :param version_dict_schema: It contains a dictionary with the version as key,
+        and json schema to apply as value.
+        It can contain a None as key, and this is apply
+        if the client data version does not match any key
+    :return:  user_data, used_schema: if the data is successfully decoded and
+        matches the schema.
+
+    Launch a bottle abort if fails
+    """
+    #print "HEADERS :" + str(bottle.request.headers.items())
+    try:
+        error_text = "Invalid header format "
+        format_type = bottle.request.headers.get('Content-Type', 'application/json')
+        if 'application/json' in format_type:
+            error_text = "Invalid json format "
+            #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
+            client_data = json.load(TextIOWrapper(bottle.request.body, encoding="utf-8"))  # TODO py3
+            #client_data = bottle.request.json()
+        elif 'application/yaml' in format_type:
+            error_text = "Invalid yaml format "
+            client_data = yaml.load(bottle.request.body, Loader=yaml.Loader)
+        elif 'application/xml' in format_type:
+            bottle.abort(501, "Content-Type: application/xml not supported yet.")
+        else:
+            logger.warning('Content-Type ' + str(format_type) + ' not supported.')
+            bottle.abort(httperrors.Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
+            return
+        # if client_data == None:
+        #    bottle.abort(httperrors.Bad_Request, "Content error, empty")
+        #    return
+        if confidential_data:
+            logger.debug('IN: %s', remove_clear_passwd (yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
+                                              tags=False, allow_unicode=True)))
+        else:
+            logger.debug('IN: %s', yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
+                                              tags=False, allow_unicode=True) )
+        # look for the client provider version
+        error_text = "Invalid content "
+        if not default_schema and not version_fields:
+            return client_data, None
+        client_version = None
+        used_schema = None
+        if version_fields != None:
+            client_version = client_data
+            for field in version_fields:
+                if field in client_version:
+                    client_version = client_version[field]
+                else:
+                    client_version=None
+                    break
+        if client_version == None:
+            used_schema = default_schema
+        elif version_dict_schema != None:
+            if client_version in version_dict_schema:
+                used_schema = version_dict_schema[client_version]
+            elif None in version_dict_schema:
+                used_schema = version_dict_schema[None]
+        if used_schema==None:
+            bottle.abort(httperrors.Bad_Request, "Invalid schema version or missing version field")
+
+        js_v(client_data, used_schema)
+        return client_data, used_schema
+    except (TypeError, ValueError, yaml.YAMLError) as exc:
+        error_text += str(exc)
+        logger.error(error_text)
+        bottle.abort(httperrors.Bad_Request, error_text)
+    except js_e.ValidationError as exc:
+        logger.error(
+            "validate_in error, jsonschema exception")
+        error_pos = ""
+        if len(exc.path)>0: error_pos=" at " + ":".join(map(json.dumps, exc.path))
+        bottle.abort(httperrors.Bad_Request, error_text + exc.message + error_pos)
+    #except:
+    #    bottle.abort(httperrors.Bad_Request, "Content error: Failed to parse Content-Type",  error_pos)
+    #    raise
+
+def filter_query_string(qs, http2db, allowed):
+    '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
+    Attributes:
+        'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
+        'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
+        'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
+    Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
+        select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
+        where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
+        limit: limit dictated by user with the query string 'limit'. 100 by default
+    abort if not permited, using bottel.abort
+    '''
+    where={}
+    limit=100
+    select=[]
+    #if type(qs) is not bottle.FormsDict:
+    #    bottle.abort(httperrors.Internal_Server_Error, '!!!!!!!!!!!!!!invalid query string not a dictionary')
+    #    #bottle.abort(httperrors.Internal_Server_Error, "call programmer")
+    for k in qs:
+        if k=='field':
+            select += qs.getall(k)
+            for v in select:
+                if v not in allowed:
+                    bottle.abort(httperrors.Bad_Request, "Invalid query string at 'field="+v+"'")
+        elif k=='limit':
+            try:
+                limit=int(qs[k])
+            except:
+                bottle.abort(httperrors.Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
+        else:
+            if k not in allowed:
+                bottle.abort(httperrors.Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
+            if qs[k]!="null":  where[k]=qs[k]
+            else: where[k]=None
+    if len(select)==0: select += allowed
+    #change from http api to database naming
+    for i in range(0,len(select)):
+        k=select[i]
+        if http2db and k in http2db:
+            select[i] = http2db[k]
+    if http2db:
+        change_keys_http2db(where, http2db)
+    #print "filter_query_string", select,where,limit
+
+    return select,where,limit
diff --git a/RO/osm_ro/http_tools/tests/__init__.py b/RO/osm_ro/http_tools/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/RO/osm_ro/http_tools/tests/test_errors.py b/RO/osm_ro/http_tools/tests/test_errors.py
new file mode 100644 (file)
index 0000000..a968e76
--- /dev/null
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+import bottle
+
+from .. import errors as httperrors
+from ...tests.helpers import TestCaseWithLogging
+
+
+class TestHttpErrors(TestCaseWithLogging):
+    def test_http_error_base(self):
+        # When an error code is passed as argument
+        ex = httperrors.HttpMappedError(http_code=1226324)
+        # then it should be set in the exception object
+        self.assertEqual(ex.http_code, 1226324)
+        # When an error code is not passed as argument
+        ex = httperrors.HttpMappedError()
+        # then the default error code (internal server error) should be used
+        self.assertEqual(ex.http_code, httperrors.Internal_Server_Error)
+
+    def test_error_handler_should_log_unexpected_errors(self):
+        # Given a error handler wraps a function
+        error_handler = httperrors.ErrorHandler(self.logger)
+
+        # and the function raises an unexpected error
+        @error_handler
+        def _throw():
+            raise AttributeError('some error')
+
+        # when the function is called
+        with self.assertRaises(bottle.HTTPError):
+            _throw()
+        logs = self.caplog.getvalue()
+        # then the exception should be contained by bottle
+        # and a proper message should be logged
+        assert "Unexpected exception:" in logs
+
+    def test_error_handler_should_log_http_based_errors(self):
+        # Given a error handler wraps a function
+        error_handler = httperrors.ErrorHandler(self.logger)
+
+        # and the function raises an error that is considered by the
+        # application
+        @error_handler
+        def _throw():
+            raise httperrors.HttpMappedError(http_code=404)
+
+        # when the function is called
+        with self.assertRaises(bottle.HTTPError):
+            _throw()
+        logs = self.caplog.getvalue()
+        # then the exception should be contained by bottle
+        # and a proper message should be logged
+        assert "_throw error 404" in logs
+
+    def test_error_handler_should_ignore_bottle_errors(self):
+        # Given a error handler wraps a function
+        error_handler = httperrors.ErrorHandler(self.logger)
+
+        # and the function raises an error that is considered by the
+        # application
+        exception = bottle.HTTPError()
+
+        @error_handler
+        def _throw():
+            raise exception
+
+        # when the function is called
+        with self.assertRaises(bottle.HTTPError) as context:
+            _throw()
+        # then the exception should bypass the error handler
+        self.assertEqual(context.exception, exception)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/http_tools/tests/test_handler.py b/RO/osm_ro/http_tools/tests/test_handler.py
new file mode 100644 (file)
index 0000000..af32545
--- /dev/null
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+from mock import MagicMock, patch
+from webtest import TestApp
+
+from .. import handler
+from ..handler import BaseHandler, route
+
+
+class TestIntegration(unittest.TestCase):
+    def test_wsgi_app(self):
+        # Given a Handler class that implements a route
+        some_plugin = MagicMock()
+
+        class MyHandler(BaseHandler):
+            url_base = '/42'
+            plugins = [some_plugin]
+
+            @route('get', '/some/path')
+            def callback(self):
+                return 'some content'
+
+        route_mock = MagicMock()
+        with patch(handler.__name__+'.Bottle.route', route_mock):
+            # When we try to access wsgi_app for the first time
+            my_handler = MyHandler()
+            assert my_handler.wsgi_app
+            # then bottle.route should be called with the right arguments
+            route_mock.assert_called_once_with('/42/some/path', method='GET',
+                                               callback=my_handler.callback,
+                                               apply=[some_plugin])
+
+            # When we try to access wsgi_app for the second time
+            assert my_handler.wsgi_app
+            # then the result should be cached
+            # and bottle.route should not be called again
+            self.assertEqual(route_mock.call_count, 1)
+
+    def test_route_created(self):
+        # Given a Handler class, as in the example documentation
+        class MyHandler(BaseHandler):
+            def __init__(self):
+                self.value = 42
+
+            @route('GET', '/some/path/<param>')
+            def callback(self, param):
+                return '{} + {}'.format(self.value, param)
+
+        # when this class is used to generate a webapp
+        app = TestApp(MyHandler().wsgi_app)
+
+        # then the defined URLs should be available
+        response = app.get('/some/path/0')
+        self.assertEqual(response.status_code, 200)
+        # and the callbacks should have access to ``self``
+        response.mustcontain('42 + 0')
+
+    def test_url_base(self):
+        # Given a Handler class that allows url_base customization
+        class MyHandler(BaseHandler):
+            def __init__(self, url_base):
+                self.url_base = url_base
+
+            @route('GET', '/some/path/<param>')
+            def callback(self, param):
+                return param
+
+        # when this class is used to generate a webapp
+        app = TestApp(MyHandler('/prefix').wsgi_app)
+
+        # then the prefixed URLs should be available
+        response = app.get('/prefix/some/path/content')
+        self.assertEqual(response.status_code, 200)
+        response.mustcontain('content')
+
+    def test_starting_param(self):
+        # Given a Handler class with a route beginning with a param
+        class MyHandler(BaseHandler):
+            @route('GET', '/<param>/some/path')
+            def callback(self, param):
+                return '**{}**'.format(param)
+
+        # is used to generate a webapp
+        app = TestApp(MyHandler().wsgi_app)
+
+        # when the defined URLs is accessed
+        response = app.get('/42/some/path')
+        # Then no error should happen
+        self.assertEqual(response.status_code, 200)
+        response.mustcontain('**42**')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/http_tools/tox.ini b/RO/osm_ro/http_tools/tox.ini
new file mode 100644 (file)
index 0000000..43055c2
--- /dev/null
@@ -0,0 +1,49 @@
+# This tox file allows the devs to run unit tests only for this subpackage.
+# In order to do so, cd into the directory and run `tox`
+
+[tox]
+minversion = 1.8
+envlist = py27,py36,flake8,radon
+skipsdist = True
+
+[testenv]
+changedir = {toxinidir}
+commands =
+    nosetests -d --with-coverage --cover-package=. {posargs:tests}
+deps =
+    WebTest
+    bottle
+    coverage
+    mock
+    nose
+    six
+    PyYaml
+
+[testenv:flake8]
+changedir = {toxinidir}
+deps = flake8
+commands = flake8 {posargs:.}
+
+[testenv:radon]
+changedir = {toxinidir}
+deps = radon
+commands =
+    radon cc --show-complexity --total-average {posargs:.}
+    radon mi -s {posargs:.}
+
+[coverage:run]
+branch = True
+source = {toxinidir}
+omit =
+    tests
+    tests/*
+    */test_*
+    .tox/*
+
+[coverage:report]
+show_missing = True
+
+[flake8]
+exclude =
+    request_processing.py
+    .tox
diff --git a/RO/osm_ro/httpserver.py b/RO/osm_ro/httpserver.py
new file mode 100644 (file)
index 0000000..d86271c
--- /dev/null
@@ -0,0 +1,1533 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+HTTP server implementing the openmano API. It will answer to POST, PUT, GET methods in the appropriate URLs
+and will use the nfvo.py module to run the appropriate method.
+Every YAML/JSON file is checked against a schema in openmano_schemas.py module.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$17-sep-2014 09:07:15$"
+
+import bottle
+import yaml
+import threading
+import logging
+
+from osm_ro.openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
+                            nsd_schema_v01, nsd_schema_v02, nsd_schema_v03, scenario_edit_schema, \
+                            scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
+                            tenant_schema, tenant_edit_schema,\
+                            datacenter_schema, datacenter_edit_schema, datacenter_action_schema, datacenter_associate_schema,\
+                            object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
+                            sdn_port_mapping_schema, sdn_external_port_schema
+
+from .http_tools import errors as httperrors
+from .http_tools.request_processing import (
+    format_out,
+    format_in,
+    filter_query_string
+)
+from .wim.http_handler import WimHandler
+
+from . import nfvo
+from . import utils
+from .db_base import db_base_Exception
+from functools import wraps
+
+global mydb
+global url_base
+global logger
+url_base="/openmano"
+logger = None
+
+
+def log_to_logger(fn):
+    '''
+    Wrap a Bottle request so that a log line is emitted after it's handled.
+    (This decorator can be extended to take the desired logger as a param.)
+    '''
+    @wraps(fn)
+    def _log_to_logger(*args, **kwargs):
+        actual_response = fn(*args, **kwargs)
+        # modify this to log exactly what you need:
+        logger.info('FROM %s %s %s %s', bottle.request.remote_addr,
+                                        bottle.request.method,
+                                        bottle.request.url,
+                                        bottle.response.status)
+        return actual_response
+    return _log_to_logger
+
+class httpserver(threading.Thread):
+    def __init__(self, db, admin=False, host='localhost', port=9090,
+                 wim_persistence=None, wim_engine=None):
+        #global url_base
+        global mydb
+        global logger
+        #initialization
+        if not logger:
+            logger = logging.getLogger('openmano.http')
+        threading.Thread.__init__(self)
+        self.host = host
+        self.port = port   #Port where the listen service must be started
+        if admin==True:
+            self.name = "http_admin"
+        else:
+            self.name = "http"
+            #self.url_preffix = 'http://' + host + ':' + str(port) + url_base
+            mydb = db
+        #self.first_usable_connection_index = 10
+        #self.next_connection_index = self.first_usable_connection_index #The next connection index to be used
+        #Ensure that when the main program exits the thread will also exit
+
+        self.handlers = [
+            WimHandler(db, wim_persistence, wim_engine, url_base)
+        ]
+
+        self.daemon = True
+        self.setDaemon(True)
+
+    def run(self, debug=False, quiet=True):
+        bottle.install(log_to_logger)
+        default_app = bottle.app()
+
+        for handler in self.handlers:
+            default_app.merge(handler.wsgi_app)
+
+        bottle.run(host=self.host, port=self.port, debug=debug, quiet=quiet)
+
+
+def run_bottle(db, host_='localhost', port_=9090):
+    '''Used for launching in main thread, so that it can be debugged'''
+    server = httpserver(db, host=host_, port=port_)
+    server.run(debug=True)  # quiet=True
+
+
+@bottle.route(url_base + '/', method='GET')
+def http_get():
+    #print
+    return 'works' #TODO: to be completed
+
+@bottle.hook('after_request')
+def enable_cors():
+    '''Don't know yet if really needed. Keep it just in case'''
+    bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+
+@bottle.route(url_base + '/version', method='GET')
+def http_get_version():
+    return nfvo.get_version()
+#
+# VNFs
+#
+
+@bottle.route(url_base + '/tenants', method='GET')
+def http_get_tenants():
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+            ('uuid','name','description','created_at') )
+    try:
+        tenants = mydb.get_rows(FROM='nfvo_tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
+        #change_keys_http2db(content, http2db_tenant, reverse=True)
+        utils.convert_float_timestamp2str(tenants)
+        data={'tenants' : tenants}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except db_base_Exception as e:
+        logger.error("http_get_tenants error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
+def http_get_tenant_id(tenant_id):
+    '''get tenant details, can use both uuid or name'''
+    #obtain data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        from_ = 'nfvo_tenants'
+        select_, where_, limit_ = filter_query_string(bottle.request.query, None,
+                                                      ('uuid', 'name', 'description', 'created_at'))
+        what = 'uuid' if utils.check_valid_uuid(tenant_id) else 'name'
+        where_[what] = tenant_id
+        tenants = mydb.get_rows(FROM=from_, SELECT=select_,WHERE=where_)
+        #change_keys_http2db(content, http2db_tenant, reverse=True)
+        if len(tenants) == 0:
+            bottle.abort(httperrors.Not_Found, "No tenant found with {}='{}'".format(what, tenant_id))
+        elif len(tenants) > 1:
+            bottle.abort(httperrors.Bad_Request, "More than one tenant found with {}='{}'".format(what, tenant_id))
+        utils.convert_float_timestamp2str(tenants[0])
+        data = {'tenant': tenants[0]}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except db_base_Exception as e:
+        logger.error("http_get_tenant_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants', method='POST')
+def http_post_tenants():
+    '''insert a tenant into the catalogue. '''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( tenant_schema )
+    r = utils.remove_extra_items(http_content, tenant_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        data = nfvo.new_tenant(mydb, http_content['tenant'])
+        return http_get_tenant_id(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_tenants error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
+def http_edit_tenant_id(tenant_id):
+    '''edit tenant details, can use both uuid or name'''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( tenant_edit_schema )
+    r = utils.remove_extra_items(http_content, tenant_edit_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+
+    #obtain data, check that only one exist
+    try:
+        tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+        #edit data
+        tenant_id = tenant['uuid']
+        where={'uuid': tenant['uuid']}
+        mydb.update_rows('nfvo_tenants', http_content['tenant'], where)
+        return http_get_tenant_id(tenant_id)
+    except bottle.HTTPError:
+        raise
+    except db_base_Exception as e:
+        logger.error("http_edit_tenant_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
+def http_delete_tenant_id(tenant_id):
+    '''delete a tenant from database, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        data = nfvo.delete_tenant(mydb, tenant_id)
+        return format_out({"result":"tenant " + data + " deleted"})
+    except bottle.HTTPError:
+        raise
+    except db_base_Exception as e:
+        logger.error("http_delete_tenant_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters', method='GET')
+def http_get_datacenters(tenant_id):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        if tenant_id != 'any':
+            #check valid tenant_id
+            nfvo.check_tenant(mydb, tenant_id)
+        select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+                ('uuid','name','vim_url','type','created_at') )
+        if tenant_id != 'any':
+            where_['nfvo_tenant_id'] = tenant_id
+            if 'created_at' in select_:
+                select_[ select_.index('created_at') ] = 'd.created_at as created_at'
+            if 'created_at' in where_:
+                where_['d.created_at'] = where_.pop('created_at')
+            datacenters = mydb.get_rows(FROM='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id',
+                                          SELECT=select_,WHERE=where_,LIMIT=limit_)
+        else:
+            datacenters = mydb.get_rows(FROM='datacenters',
+                                          SELECT=select_,WHERE=where_,LIMIT=limit_)
+        #change_keys_http2db(content, http2db_tenant, reverse=True)
+        utils.convert_float_timestamp2str(datacenters)
+        data={'datacenters' : datacenters}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_datacenters error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='GET')
+@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='GET')
+def http_get_vim_account(tenant_id, vim_account_id=None):
+    '''get vim_account list/details, '''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        select_ = ('uuid', 'name', 'dt.datacenter_id as vim_id', 'vim_tenant_name', 'vim_tenant_id', 'user', 'config',
+                   'dt.created_at as created_at', 'passwd')
+        where_ = {'nfvo_tenant_id': tenant_id}
+        if vim_account_id:
+            where_['dt.uuid'] = vim_account_id
+        from_ = 'tenants_datacenters as td join datacenter_tenants as dt on dt.uuid=td.datacenter_tenant_id'
+        vim_accounts = mydb.get_rows(SELECT=select_, FROM=from_, WHERE=where_)
+
+        if len(vim_accounts) == 0 and vim_account_id:
+            bottle.abort(HTTP_Not_Found, "No vim_account found for tenant {} and id '{}'".format(tenant_id,
+                                                                                                 vim_account_id))
+        for vim_account in vim_accounts:
+                if vim_account["passwd"]:
+                    vim_account["passwd"] = "******"
+                if vim_account['config'] != None:
+                    try:
+                        config_dict = yaml.load(vim_account['config'], Loader=yaml.Loader)
+                        vim_account['config'] = config_dict
+                        if vim_account['config'].get('admin_password'):
+                            vim_account['config']['admin_password'] = "******"
+                        if vim_account['config'].get('vcenter_password'):
+                            vim_account['config']['vcenter_password'] = "******"
+                        if vim_account['config'].get('nsx_password'):
+                            vim_account['config']['nsx_password'] = "******"
+                    except Exception as e:
+                        logger.error("Exception '%s' while trying to load config information", str(e))
+        # change_keys_http2db(content, http2db_datacenter, reverse=True)
+        #convert_datetime2str(vim_account)
+        if vim_account_id:
+            return format_out({"datacenter": vim_accounts[0]})
+        else:
+            return format_out({"datacenters": vim_accounts})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='GET')
+def http_get_datacenter_id(tenant_id, datacenter_id):
+    '''get datacenter details, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        if tenant_id != 'any':
+            #check valid tenant_id
+            nfvo.check_tenant(mydb, tenant_id)
+        #obtain data
+        what = 'uuid' if utils.check_valid_uuid(datacenter_id) else 'name'
+        where_={}
+        where_[what] = datacenter_id
+        select_=['uuid', 'name','vim_url', 'vim_url_admin', 'type', 'd.config as config', 'description', 'd.created_at as created_at']
+        if tenant_id != 'any':
+            select_.append("datacenter_tenant_id")
+            where_['td.nfvo_tenant_id']= tenant_id
+            from_='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id'
+        else:
+            from_='datacenters as d'
+        datacenters = mydb.get_rows(
+                    SELECT=select_,
+                    FROM=from_,
+                    WHERE=where_)
+
+        if len(datacenters)==0:
+            bottle.abort( httperrors.Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+        elif len(datacenters)>1:
+            bottle.abort( httperrors.Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+        datacenter = datacenters[0]
+        if tenant_id != 'any':
+            #get vim tenant info
+            vim_tenants = mydb.get_rows(
+                    SELECT=("vim_tenant_name", "vim_tenant_id", "user", "passwd", "config"),
+                    FROM="datacenter_tenants",
+                    WHERE={"uuid": datacenters[0]["datacenter_tenant_id"]},
+                    ORDER_BY=("created", ) )
+            del datacenter["datacenter_tenant_id"]
+            datacenter["vim_tenants"] = vim_tenants
+            for vim_tenant in vim_tenants:
+                if vim_tenant["passwd"]:
+                    vim_tenant["passwd"] = "******"
+                if vim_tenant['config'] != None:
+                    try:
+                        config_dict = yaml.load(vim_tenant['config'], Loader=yaml.Loader)
+                        vim_tenant['config'] = config_dict
+                        if vim_tenant['config'].get('admin_password'):
+                            vim_tenant['config']['admin_password'] = "******"
+                        if vim_tenant['config'].get('vcenter_password'):
+                            vim_tenant['config']['vcenter_password'] = "******"
+                        if vim_tenant['config'].get('nsx_password'):
+                            vim_tenant['config']['nsx_password'] = "******"
+                    except Exception as e:
+                        logger.error("Exception '%s' while trying to load config information", str(e))
+
+        if datacenter['config'] != None:
+            try:
+                config_dict = yaml.load(datacenter['config'], Loader=yaml.Loader)
+                datacenter['config'] = config_dict
+                if datacenter['config'].get('admin_password'):
+                    datacenter['config']['admin_password'] = "******"
+                if datacenter['config'].get('vcenter_password'):
+                    datacenter['config']['vcenter_password'] = "******"
+                if datacenter['config'].get('nsx_password'):
+                    datacenter['config']['nsx_password'] = "******"
+            except Exception as e:
+                logger.error("Exception '%s' while trying to load config information", str(e))
+        #change_keys_http2db(content, http2db_datacenter, reverse=True)
+        utils.convert_float_timestamp2str(datacenter)
+        data={'datacenter' : datacenter}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/datacenters', method='POST')
+def http_post_datacenters():
+    '''insert a datacenter into the catalogue. '''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in(datacenter_schema, confidential_data=True)
+    r = utils.remove_extra_items(http_content, datacenter_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        data = nfvo.new_datacenter(mydb, http_content['datacenter'])
+        return http_get_datacenter_id('any', data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_datacenters error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/datacenters/<datacenter_id_name>', method='PUT')
+def http_edit_datacenter_id(datacenter_id_name):
+    '''edit datacenter details, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in( datacenter_edit_schema )
+    r = utils.remove_extra_items(http_content, datacenter_edit_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+
+    try:
+        datacenter_id = nfvo.edit_datacenter(mydb, datacenter_id_name, http_content['datacenter'])
+        return http_get_datacenter_id('any', datacenter_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_edit_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
+def http_post_sdn_controller(tenant_id):
+    '''insert a sdn controller into the catalogue. '''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( sdn_controller_schema )
+    try:
+        logger.debug("tenant_id: "+tenant_id)
+        #logger.debug("content: {}".format(http_content['sdn_controller']))
+
+        data = nfvo.sdn_controller_create(mydb, tenant_id, http_content['sdn_controller'])
+        return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, data)})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
+def http_put_sdn_controller_update(tenant_id, controller_id):
+    '''Update sdn controller'''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( sdn_controller_edit_schema )
+#    r = utils.remove_extra_items(http_content, datacenter_schema)
+#    if r:
+#        logger.debug("Remove received extra items %s", str(r))
+    try:
+        #logger.debug("tenant_id: "+tenant_id)
+        logger.debug("content: {}".format(http_content['sdn_controller']))
+
+        data = nfvo.sdn_controller_update(mydb, tenant_id, controller_id, http_content['sdn_controller'])
+        return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, controller_id)})
+
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
+def http_get_sdn_controller(tenant_id):
+    '''get sdn controllers list, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+        data = {'sdn_controllers': nfvo.sdn_controller_list(mydb, tenant_id)}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_sdn_controller error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
+def http_get_sdn_controller_id(tenant_id, controller_id):
+    '''get sdn controller details, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+        data = nfvo.sdn_controller_list(mydb, tenant_id, controller_id)
+        return format_out({"sdn_controllers": data})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
+def http_delete_sdn_controller_id(tenant_id, controller_id):
+    '''delete sdn controller, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+        data = nfvo.sdn_controller_delete(mydb, tenant_id, controller_id)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
+def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+    '''Set the sdn port mapping for a datacenter. '''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, _ = format_in(sdn_port_mapping_schema)
+#    r = utils.remove_extra_items(http_content, datacenter_schema)
+#    if r:
+#        logger.debug("Remove received extra items %s", str(r))
+    try:
+        data = nfvo.datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, http_content['sdn_port_mapping'])
+        return format_out({"sdn_port_mapping": data})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
+def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+    '''get datacenter sdn mapping details, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+        data = nfvo.datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id)
+        return format_out({"sdn_port_mapping": data})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
+def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+    '''clean datacenter sdn mapping, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+        data = nfvo.datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id)
+        return format_out({"result": data})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET')  #deprecated
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='GET')
+def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
+    '''get datacenter networks, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #obtain data
+    try:
+        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
+        where_= {"datacenter_id":datacenter_dict['uuid']}
+        if netmap_id:
+            if utils.check_valid_uuid(netmap_id):
+                where_["uuid"] = netmap_id
+            else:
+                where_["name"] = netmap_id
+        netmaps =mydb.get_rows(FROM='datacenter_nets',
+                                        SELECT=('name','vim_net_id as vim_id', 'uuid', 'type','multipoint','shared','description', 'created_at'),
+                                        WHERE=where_ )
+        utils.convert_float_timestamp2str(netmaps)
+        utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+        if netmap_id and len(netmaps)==1:
+            data={'netmap' : netmaps[0]}
+        elif netmap_id and len(netmaps)==0:
+            bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.items())) )
+            return
+        else:
+            data={'netmaps' : netmaps}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_getnetwork_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='DELETE')
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='DELETE')
+def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
+    '''get datacenter networks, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #obtain data
+    try:
+        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
+        where_= {"datacenter_id":datacenter_dict['uuid']}
+        if netmap_id:
+            if utils.check_valid_uuid(netmap_id):
+                where_["uuid"] = netmap_id
+            else:
+                where_["name"] = netmap_id
+        #change_keys_http2db(content, http2db_tenant, reverse=True)
+        deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_)
+        if deleted == 0 and netmap_id:
+            bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.items())) )
+        if netmap_id:
+            return format_out({"result": "netmap {} deleted".format(netmap_id)})
+        else:
+            return format_out({"result": "{} netmap deleted".format(deleted)})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/upload', method='POST')
+def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, None)
+        utils.convert_float_timestamp2str(netmaps)
+        utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+        data={'netmaps' : netmaps}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_uploadnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='POST')
+def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
+    '''creates a new netmap'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in( netmap_new_schema )
+    r = utils.remove_extra_items(http_content, netmap_new_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        #obtain data, check that only one exist
+        netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, http_content)
+        utils.convert_float_timestamp2str(netmaps)
+        utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
+        data={'netmaps' : netmaps}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_postnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='PUT')
+def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
+    '''edit a  netmap'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in( netmap_edit_schema )
+    r = utils.remove_extra_items(http_content, netmap_edit_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+
+    #obtain data, check that only one exist
+    try:
+        nfvo.datacenter_edit_netmap(mydb, tenant_id, datacenter_id, netmap_id, http_content)
+        return http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_putnettmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/action', method='POST')
+def http_action_datacenter_id(tenant_id, datacenter_id):
+    '''perform an action over datacenter, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in( datacenter_action_schema )
+    r = utils.remove_extra_items(http_content, datacenter_action_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        #obtain data, check that only one exist
+        result = nfvo.datacenter_action(mydb, tenant_id, datacenter_id, http_content)
+        if 'net-update' in http_content:
+            return http_getnetmap_datacenter_id(datacenter_id)
+        else:
+            return format_out(result)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_action_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/datacenters/<datacenter_id>', method='DELETE')
+def http_delete_datacenter_id( datacenter_id):
+    '''delete a tenant from database, can use both uuid or name'''
+
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        data = nfvo.delete_datacenter(mydb, datacenter_id)
+        return format_out({"result":"datacenter '" + data + "' deleted"})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_datacenter_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='POST')
+@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='POST')
+def http_associate_datacenters(tenant_id, datacenter_id=None):
+    '''associate an existing datacenter to a this tenant. '''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in(datacenter_associate_schema, confidential_data=True)
+    r = utils.remove_extra_items(http_content, datacenter_associate_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        vim_account_id = nfvo.create_vim_account(mydb, tenant_id, datacenter_id,
+                                                             **http_content['datacenter'])
+        return http_get_vim_account(tenant_id, vim_account_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_associate_datacenters error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='PUT')
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
+def http_vim_account_edit(tenant_id, vim_account_id=None, datacenter_id=None):
+    '''associate an existing datacenter to a this tenant. '''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in(datacenter_associate_schema)
+    r = utils.remove_extra_items(http_content, datacenter_associate_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        vim_account_id = nfvo.edit_vim_account(mydb, tenant_id, vim_account_id, datacenter_id=datacenter_id,
+                                               **http_content['datacenter'])
+        return http_get_vim_account(tenant_id, vim_account_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_vim_account_edit error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
+@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='DELETE')
+def http_deassociate_datacenters(tenant_id, datacenter_id=None, vim_account_id=None):
+    '''deassociate an existing datacenter to a this tenant. '''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        data = nfvo.delete_vim_account(mydb, tenant_id, vim_account_id, datacenter_id)
+        return format_out({"result": data})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_deassociate_datacenters error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/attach', method='POST')
+def http_post_vim_net_sdn_attach(tenant_id, datacenter_id, network_id):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, _ = format_in(sdn_external_port_schema)
+    try:
+        data = nfvo.vim_net_sdn_attach(mydb, tenant_id, datacenter_id, network_id, http_content)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_vim_net_sdn_attach error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach', method='DELETE')
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach/<port_id>', method='DELETE')
+def http_delete_vim_net_sdn_detach(tenant_id, datacenter_id, network_id, port_id=None):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        data = nfvo.vim_net_sdn_detach(mydb, tenant_id, datacenter_id, network_id, port_id)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_vim_net_sdn_detach error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='GET')
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='GET')
+def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        data = nfvo.vim_action_get(mydb, tenant_id, datacenter_id, item, name)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_vim_items error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='DELETE')
+def http_del_vim_items(tenant_id, datacenter_id, item, name):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        data = nfvo.vim_action_delete(mydb, tenant_id, datacenter_id, item, name)
+        return format_out({"result":data})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_del_vim_items error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='POST')
+def http_post_vim_items(tenant_id, datacenter_id, item):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( object_schema )
+    try:
+        data = nfvo.vim_action_create(mydb, tenant_id, datacenter_id, item, http_content)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_vim_items error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vnfs', method='GET')
+def http_get_vnfs(tenant_id):
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        if tenant_id != 'any':
+            #check valid tenant_id
+            nfvo.check_tenant(mydb, tenant_id)
+        select_,where_,limit_ = filter_query_string(bottle.request.query, None,
+                ('uuid', 'name', 'osm_id', 'description', 'public', "tenant_id", "created_at") )
+        if tenant_id != "any":
+            where_["OR"]={"tenant_id": tenant_id, "public": True}
+        vnfs = mydb.get_rows(FROM='vnfs', SELECT=select_, WHERE=where_, LIMIT=limit_)
+        # change_keys_http2db(content, http2db_vnf, reverse=True)
+        utils.convert_str2boolean(vnfs, ('public',))
+        utils.convert_float_timestamp2str(vnfs)
+        data={'vnfs': vnfs}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_vnfs error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='GET')
+def http_get_vnf_id(tenant_id,vnf_id):
+    '''get vnf details, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        vnf = nfvo.get_vnf_id(mydb,tenant_id,vnf_id)
+        utils.convert_str2boolean(vnf, ('public',))
+        utils.convert_float_timestamp2str(vnf)
+        return format_out(vnf)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_vnf_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/vnfs', method='POST')
+def http_post_vnfs(tenant_id):
+    """ Insert a vnf into the catalogue. Creates the flavor and images, and fill the tables at database
+    :param tenant_id: tenant that this vnf belongs to
+    :return:
+    """
+    # print "Parsing the YAML file of the VNF"
+    # parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, used_schema = format_in( vnfd_schema_v01, ("schema_version",), {"0.2": vnfd_schema_v02})
+    r = utils.remove_extra_items(http_content, used_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        if used_schema == vnfd_schema_v01:
+            vnf_id = nfvo.new_vnf(mydb,tenant_id,http_content)
+        elif used_schema == vnfd_schema_v02:
+            vnf_id = nfvo.new_vnf_v02(mydb,tenant_id,http_content)
+        else:
+            logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
+            bottle.abort(httperrors.Bad_Request, "Invalid schema version")
+        return http_get_vnf_id(tenant_id, vnf_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/v3/<tenant_id>/vnfd', method='POST')
+def http_post_vnfs_v3(tenant_id):
+    """
+    Insert one or several VNFs in the catalog, following OSM IM
+    :param tenant_id: tenant owner of the VNF
+    :return: The detailed list of inserted VNFs, following the old format
+    """
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, _ = format_in(None)
+    try:
+        vnfd_uuid_list = nfvo.new_vnfd_v3(mydb, tenant_id, http_content)
+        vnfd_list = []
+        for vnfd_uuid in vnfd_uuid_list:
+            vnf = nfvo.get_vnf_id(mydb, tenant_id, vnfd_uuid)
+            utils.convert_str2boolean(vnf, ('public',))
+            utils.convert_float_timestamp2str(vnf)
+            vnfd_list.append(vnf["vnf"])
+        return format_out({"vnfd": vnfd_list})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='DELETE')
+def http_delete_vnf_id(tenant_id, vnf_id):
+    '''delete a vnf from database, and images and flavors in VIM when appropriate, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #check valid tenant_id and deletes the vnf, including images,
+    try:
+        data = nfvo.delete_vnf(mydb,tenant_id,vnf_id)
+        #print json.dumps(data, indent=4)
+        return format_out({"result":"VNF " + data + " deleted"})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_vnf_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+#@bottle.route(url_base + '/<tenant_id>/hosts/topology', method='GET')
+#@bottle.route(url_base + '/<tenant_id>/physicalview/Madrid-Alcantara', method='GET')
+@bottle.route(url_base + '/<tenant_id>/physicalview/<datacenter>', method='GET')
+def http_get_hosts(tenant_id, datacenter):
+    '''get the tidvim host hopology from the vim.'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #print "http_get_hosts received by tenant " + tenant_id + ' datacenter ' + datacenter
+    try:
+        if datacenter == 'treeview':
+            data = nfvo.get_hosts(mydb, tenant_id)
+        else:
+            #openmano-gui is using a hardcoded value for the datacenter
+            result, data = nfvo.get_hosts_info(mydb, tenant_id) #, datacenter)
+
+        if result < 0:
+            #print("http_get_hosts error {} {}".format((-result, data))
+            bottle.abort(-result, data)
+        else:
+            utils.convert_float_timestamp2str(data)
+            #print json.dumps(data, indent=4)
+            return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_hosts error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<path:path>', method='OPTIONS')
+def http_options_deploy(path):
+    '''For some reason GUI web ask for OPTIONS that must be responded'''
+    #TODO: check correct path, and correct headers request
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    bottle.response.set_header('Access-Control-Allow-Methods','POST, GET, PUT, DELETE, OPTIONS')
+    bottle.response.set_header('Accept','application/yaml,application/json')
+    bottle.response.set_header('Content-Type','application/yaml,application/json')
+    bottle.response.set_header('Access-Control-Allow-Headers','content-type')
+    bottle.response.set_header('Access-Control-Allow-Origin','*')
+    return
+
+@bottle.route(url_base + '/<tenant_id>/topology/deploy', method='POST')
+def http_post_deploy(tenant_id):
+    '''post topology deploy.'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+    http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02})
+    #r = utils.remove_extra_items(http_content, used_schema)
+    #if r is not None: print "http_post_deploy: Warning: remove extra items ", r
+    #print "http_post_deploy input: ",  http_content
+
+    try:
+        scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
+        instance = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['name'], http_content['name'])
+        #print json.dumps(data, indent=4)
+        return format_out(instance)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_deploy error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/topology/verify', method='POST')
+def http_post_verify(tenant_id):
+    #TODO:
+#    '''post topology verify'''
+#    print "http_post_verify by tenant " + tenant_id + ' datacenter ' + datacenter
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    return
+
+#
+# SCENARIOS
+#
+
+@bottle.route(url_base + '/<tenant_id>/scenarios', method='POST')
+def http_post_scenarios(tenant_id):
+    '''add a scenario into the catalogue. Creates the scenario and its internal structure in the OPENMANO DB'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02, "0.3": nsd_schema_v03})
+    #r = utils.remove_extra_items(http_content, used_schema)
+    #if r is not None: print "http_post_scenarios: Warning: remove extra items ", r
+    #print "http_post_scenarios input: ",  http_content
+    try:
+        if used_schema == nsd_schema_v01:
+            scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
+        elif used_schema == nsd_schema_v02:
+            scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.2")
+        elif used_schema == nsd_schema_v03:
+            scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.3")
+        else:
+            logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
+            bottle.abort(httperrors.Bad_Request, "Invalid schema version")
+        #print json.dumps(data, indent=4)
+        #return format_out(data)
+        return http_get_scenario_id(tenant_id, scenario_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_scenarios error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/v3/<tenant_id>/nsd', method='POST')
+def http_post_nsds_v3(tenant_id):
+    """
+    Insert one or several NSDs in the catalog, following OSM IM
+    :param tenant_id: tenant owner of the NSD
+    :return: The detailed list of inserted NSDs, following the old format
+    """
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, _ = format_in(None)
+    try:
+        nsd_uuid_list = nfvo.new_nsd_v3(mydb, tenant_id, http_content)
+        nsd_list = []
+        for nsd_uuid in nsd_uuid_list:
+            scenario = mydb.get_scenario(nsd_uuid, tenant_id)
+            utils.convert_float_timestamp2str(scenario)
+            nsd_list.append(scenario)
+        data = {'nsd': nsd_list}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_nsds_v3 error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>/action', method='POST')
+def http_post_scenario_action(tenant_id, scenario_id):
+    '''take an action over a scenario'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    # parse input data
+    http_content, _ = format_in(scenario_action_schema)
+    r = utils.remove_extra_items(http_content, scenario_action_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        # check valid tenant_id
+        nfvo.check_tenant(mydb, tenant_id)
+        if "start" in http_content:
+            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['start']['instance_name'], \
+                        http_content['start'].get('description',http_content['start']['instance_name']),
+                        http_content['start'].get('datacenter') )
+            return format_out(data)
+        elif "deploy" in http_content:   #Equivalent to start
+            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['deploy']['instance_name'],
+                        http_content['deploy'].get('description',http_content['deploy']['instance_name']),
+                        http_content['deploy'].get('datacenter') )
+            return format_out(data)
+        elif "reserve" in http_content:   #Reserve resources
+            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['reserve']['instance_name'],
+                        http_content['reserve'].get('description',http_content['reserve']['instance_name']),
+                        http_content['reserve'].get('datacenter'),  startvms=False )
+            return format_out(data)
+        elif "verify" in http_content:   #Equivalent to start and then delete
+            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['verify']['instance_name'],
+                        http_content['verify'].get('description',http_content['verify']['instance_name']),
+                        http_content['verify'].get('datacenter'), startvms=False )
+            instance_id = data['uuid']
+            nfvo.delete_instance(mydb, tenant_id,instance_id)
+            return format_out({"result":"Verify OK"})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_scenario_action error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios', method='GET')
+def http_get_scenarios(tenant_id):
+    '''get scenarios list'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        #obtain data
+        s,w,l=filter_query_string(bottle.request.query, None,
+                                  ('uuid', 'name', 'osm_id', 'description', 'tenant_id', 'created_at', 'public'))
+        if tenant_id != "any":
+            w["OR"] = {"tenant_id": tenant_id, "public": True}
+        scenarios = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='scenarios')
+        utils.convert_float_timestamp2str(scenarios)
+        utils.convert_str2boolean(scenarios, ('public',) )
+        data={'scenarios':scenarios}
+        #print json.dumps(scenarios, indent=4)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='GET')
+def http_get_scenario_id(tenant_id, scenario_id):
+    '''get scenario details, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        #obtain data
+        scenario = mydb.get_scenario(scenario_id, tenant_id)
+        utils.convert_float_timestamp2str(scenario)
+        data={'scenario' : scenario}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='DELETE')
+def http_delete_scenario_id(tenant_id, scenario_id):
+    '''delete a scenario from database, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        #obtain data
+        data = mydb.delete_scenario(scenario_id, tenant_id)
+        #print json.dumps(data, indent=4)
+        return format_out({"result":"scenario " + data + " deleted"})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_scenario_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='PUT')
+def http_put_scenario_id(tenant_id, scenario_id):
+    '''edit an existing scenario id'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( scenario_edit_schema )
+    #r = utils.remove_extra_items(http_content, scenario_edit_schema)
+    #if r is not None: print "http_put_scenario_id: Warning: remove extra items ", r
+    #print "http_put_scenario_id input: ",  http_content
+    try:
+        nfvo.edit_scenario(mydb, tenant_id, scenario_id, http_content)
+        #print json.dumps(data, indent=4)
+        #return format_out(data)
+        return http_get_scenario_id(tenant_id, scenario_id)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_put_scenario_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/instances', method='POST')
+def http_post_instances(tenant_id):
+    '''create an instance-scenario'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    # parse input data
+    http_content, used_schema = format_in(instance_scenario_create_schema_v01)
+    r = utils.remove_extra_items(http_content, used_schema)
+    if r is not None:
+        logger.warning("http_post_instances: Warning: remove extra items %s", str(r))
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        data = nfvo.create_instance(mydb, tenant_id, http_content["instance"])
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+#
+# INSTANCES
+#
+@bottle.route(url_base + '/<tenant_id>/instances', method='GET')
+def http_get_instances(tenant_id):
+    '''get instance list'''
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        #obtain data
+        s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'scenario_id', 'tenant_id', 'description', 'created_at'))
+        if tenant_id != "any":
+            w['tenant_id'] = tenant_id
+        instances = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='instance_scenarios')
+        utils.convert_float_timestamp2str(instances)
+        utils.convert_str2boolean(instances, ('public',) )
+        data={'instances':instances}
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_instances error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='GET')
+def http_get_instance_id(tenant_id, instance_id):
+    '''get instances details, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        if tenant_id == "any":
+            tenant_id = None
+
+        instance = nfvo.get_instance_id(mydb, tenant_id, instance_id)
+
+        # Workaround to SO, convert vnfs:vms:interfaces:ip_address from ";" separated list to report the first value
+        for vnf in instance.get("vnfs", ()):
+            for vm in vnf.get("vms", ()):
+                for iface in vm.get("interfaces", ()):
+                    if iface.get("ip_address"):
+                        index = iface["ip_address"].find(";")
+                        if index >= 0:
+                            iface["ip_address"] = iface["ip_address"][:index]
+        utils.convert_float_timestamp2str(instance)
+        # print json.dumps(instance, indent=4)
+        return format_out(instance)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_instance_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='DELETE')
+def http_delete_instance_id(tenant_id, instance_id):
+    '''delete instance from VIM and from database, can use both uuid or name'''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        if tenant_id == "any":
+            tenant_id = None
+        #obtain data
+        message = nfvo.delete_instance(mydb, tenant_id,instance_id)
+        return format_out({"result":message})
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_instance_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='POST')
+def http_post_instance_scenario_action(tenant_id, instance_id):
+    """
+    take an action over a scenario instance
+    :param tenant_id: tenant where user belongs to
+    :param instance_id: instance indentity
+    :return:
+    """
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    # parse input data
+    http_content, _ = format_in(instance_scenario_action_schema)
+    r = utils.remove_extra_items(http_content, instance_scenario_action_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        #check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+
+        #print "http_post_instance_scenario_action input: ", http_content
+        #obtain data
+        instance = mydb.get_instance_scenario(instance_id, tenant_id)
+        instance_id = instance["uuid"]
+
+        data = nfvo.instance_action(mydb, tenant_id, instance_id, http_content)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='GET')
+@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action/<action_id>', method='GET')
+def http_get_instance_scenario_action(tenant_id, instance_id, action_id=None):
+    """
+    List the actions done over an instance, or the action details
+    :param tenant_id: tenant where user belongs to. Can be "any" to ignore
+    :param instance_id: instance id, can be "any" to get actions of all instances
+    :return:
+    """
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    try:
+        # check valid tenant_id
+        if tenant_id != "any":
+            nfvo.check_tenant(mydb, tenant_id)
+        data = nfvo.instance_action_get(mydb, tenant_id, instance_id, action_id)
+        return format_out(data)
+    except bottle.HTTPError:
+        raise
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+
+@bottle.error(400)
+@bottle.error(401)
+@bottle.error(404)
+@bottle.error(403)
+@bottle.error(405)
+@bottle.error(406)
+@bottle.error(409)
+@bottle.error(503)
+@bottle.error(500)
+def error400(error):
+    e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
+    bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+    return format_out(e)
+
diff --git a/RO/osm_ro/nfvo.py b/RO/osm_ro/nfvo.py
new file mode 100644 (file)
index 0000000..a28f57f
--- /dev/null
@@ -0,0 +1,5770 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+NFVO engine, implementing all the methods for the creation, deletion and management of vnfs, scenarios and instances
+'''
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ ="$16-sep-2014 22:05:01$"
+
+# import imp
+import json
+import yaml
+from osm_ro import utils
+from osm_ro.utils import deprecated
+from osm_ro.vim_thread import vim_thread
+import osm_ro.console_proxy_thread as cli
+from osm_ro import vimconn
+import logging
+import collections
+import math
+from uuid import uuid4
+from osm_ro.db_base import db_base_Exception
+
+from osm_ro import nfvo_db
+from threading import Lock
+import time as t
+# TODO py3 BEGIN
+# from lib_osm_openvim import ovim as ovim_module
+# from lib_osm_openvim.ovim import ovimException
+from unittest.mock  import MagicMock
+ovim_module = MagicMock()
+class ovimException(Exception):
+   pass
+ovim_module.ovimException = ovimException
+# TODO py3 END
+
+from Crypto.PublicKey import RSA
+
+import osm_im.vnfd as vnfd_catalog
+import osm_im.nsd as nsd_catalog
+from pyangbind.lib.serialise import pybindJSONDecoder
+from copy import deepcopy
+from pkg_resources import iter_entry_points
+
+
+# WIM
+import osm_ro.wim.wimconn as wimconn
+import osm_ro.wim.wim_thread as wim_thread
+from osm_ro.http_tools import errors as httperrors
+from osm_ro.wim.engine import WimEngine
+from osm_ro.wim.persistence import WimPersistence
+from copy import deepcopy
+from pprint import pformat
+#
+
+global global_config
+# WIM
+global wim_engine
+wim_engine  = None
+global wimconn_imported
+#
+global logger
+global default_volume_size
+default_volume_size = '5' #size in GB
+global ovim
+ovim = None
+global_config = None
+
+plugins = {}   # dictionary with VIM type as key, loaded module as value
+vim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-VIMs
+vim_persistent_info = {}
+# WIM
+wimconn_imported = {}   # dictionary with WIM type as key, loaded module as value
+wim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-WIMs
+wim_persistent_info = {}
+#
+
+logger = logging.getLogger('openmano.nfvo')
+task_lock = Lock()
+last_task_id = 0.0
+db = None
+db_lock = Lock()
+
+
+class NfvoException(httperrors.HttpMappedError):
+    """Common Class for NFVO errors"""
+
+def _load_vim_plugin(name):
+    global plugins
+    for v in iter_entry_points('osm_rovim.plugins', name):
+        plugins[name] = v.load()
+    if name and name not in plugins:
+        raise NfvoException("Unknown vim type '{}'. This plugin has not been registered".format(name),
+                            httperrors.Bad_Request)
+
+def get_task_id():
+    global last_task_id
+    task_id = t.time()
+    if task_id <= last_task_id:
+        task_id = last_task_id + 0.000001
+    last_task_id = task_id
+    return "ACTION-{:.6f}".format(task_id)
+    # return (t.strftime("%Y%m%dT%H%M%S.{}%Z", t.localtime(task_id))).format(int((task_id % 1)*1e6))
+
+
+def new_task(name, params, depends=None):
+    """Deprected!!!"""
+    task_id = get_task_id()
+    task = {"status": "enqueued", "id": task_id, "name": name, "params": params}
+    if depends:
+        task["depends"] = depends
+    return task
+
+
+def is_task_id(id):
+    return True if id[:5] == "TASK-" else False
+
+
+def get_non_used_vim_name(datacenter_name, datacenter_id, tenant_name, tenant_id):
+    name = datacenter_name[:16]
+    if name not in vim_threads["names"]:
+        vim_threads["names"].append(name)
+        return name
+    if tenant_name:
+        name = datacenter_name[:16] + "." + tenant_name[:16]
+        if name not in vim_threads["names"]:
+            vim_threads["names"].append(name)
+            return name
+    name = datacenter_id
+    vim_threads["names"].append(name)
+    return name
+
+# -- Move
+def get_non_used_wim_name(wim_name, wim_id, tenant_name, tenant_id):
+    name = wim_name[:16]
+    if name not in wim_threads["names"]:
+        wim_threads["names"].append(name)
+        return name
+    name = wim_name[:16] + "." + tenant_name[:16]
+    if name not in wim_threads["names"]:
+        wim_threads["names"].append(name)
+        return name
+    name = wim_id + "-" + tenant_id
+    wim_threads["names"].append(name)
+    return name
+
+
+def start_service(mydb, persistence=None, wim=None):
+    global db, global_config, plugins
+    db = nfvo_db.nfvo_db(lock=db_lock)
+    mydb.lock = db_lock
+    db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
+    global ovim
+
+    persistence = persistence or  WimPersistence(db)
+
+    # Initialize openvim for SDN control
+    # TODO: Avoid static configuration by adding new parameters to openmanod.cfg
+    # TODO: review ovim.py to delete not needed configuration
+    ovim_configuration = {
+        'logger_name': 'openmano.ovim',
+        'network_vlan_range_start': 1000,
+        'network_vlan_range_end': 4096,
+        'db_name': global_config["db_ovim_name"],
+        'db_host': global_config["db_ovim_host"],
+        'db_user': global_config["db_ovim_user"],
+        'db_passwd': global_config["db_ovim_passwd"],
+        'bridge_ifaces': {},
+        'mode': 'normal',
+        'network_type': 'bridge',
+        #TODO: log_level_of should not be needed. To be modified in ovim
+        'log_level_of': 'DEBUG'
+    }
+    try:
+        # starts ovim library
+        ovim = ovim_module.ovim(ovim_configuration)
+
+        global wim_engine
+        wim_engine = wim or WimEngine(persistence)
+        wim_engine.ovim = ovim
+
+        ovim.start_service()
+
+        #delete old unneeded vim_wim_actions
+        clean_db(mydb)
+
+        # starts vim_threads
+        from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join '\
+                'datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+        select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+                   'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+                   'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+                   'user', 'passwd', 'dt.config as dt_config', 'nfvo_tenant_id')
+        vims = mydb.get_rows(FROM=from_, SELECT=select_)
+        for vim in vims:
+            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+                   'datacenter_id': vim.get('datacenter_id')}
+            if vim["config"]:
+                extra.update(yaml.load(vim["config"], Loader=yaml.Loader))
+            if vim.get('dt_config'):
+                extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+            plugin_name = "rovim_" + vim["type"]
+            if plugin_name not in plugins:
+                _load_vim_plugin(plugin_name)
+
+            thread_id = vim['datacenter_tenant_id']
+            vim_persistent_info[thread_id] = {}
+            try:
+                #if not tenant:
+                #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
+                myvim = plugins[plugin_name].vimconnector(
+                    uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+                    tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+                    url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+                    user=vim['user'], passwd=vim['passwd'],
+                    config=extra, persistent_info=vim_persistent_info[thread_id]
+                )
+            except vimconn.vimconnException as e:
+                myvim = e
+                logger.error("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
+                                                                               vim['datacenter_id'], e))
+            except Exception as e:
+                raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, e),
+                                    httperrors.Internal_Server_Error)
+            thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
+                                                vim['vim_tenant_id'])
+            new_thread = vim_thread(task_lock, plugins, thread_name, vim['datacenter_name'],
+                                    vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
+            new_thread.start()
+            vim_threads["running"][thread_id] = new_thread
+
+        wim_engine.start_threads()
+    except db_base_Exception as e:
+        raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+    except ovim_module.ovimException as e:
+        message = str(e)
+        if message[:22] == "DATABASE wrong version":
+            message = "DATABASE wrong version of lib_osm_openvim {msg} -d{dbname} -u{dbuser} -p{dbpass} {ver}' "\
+                      "at host {dbhost}".format(
+                            msg=message[22:-3], dbname=global_config["db_ovim_name"],
+                            dbuser=global_config["db_ovim_user"], dbpass=global_config["db_ovim_passwd"],
+                            ver=message[-3:-1], dbhost=global_config["db_ovim_host"])
+        raise NfvoException(message, httperrors.Bad_Request)
+
+
+def stop_service():
+    global ovim, global_config
+    if ovim:
+        ovim.stop_service()
+    for thread_id, thread in vim_threads["running"].items():
+        thread.insert_task("exit")
+        vim_threads["deleting"][thread_id] = thread
+    vim_threads["running"] = {}
+
+    if wim_engine:
+        wim_engine.stop_threads()
+
+    if global_config and global_config.get("console_thread"):
+        for thread in global_config["console_thread"]:
+            thread.terminate = True
+
+def get_version():
+    return  ("openmanod version {} {}\n(c) Copyright Telefonica".format(global_config["version"],
+                                                                        global_config["version_date"] ))
+
+def clean_db(mydb):
+    """
+    Clean unused or old entries at database to avoid unlimited growing
+    :param mydb: database connector
+    :return: None
+    """
+    # get and delete unused vim_wim_actions: all elements deleted, one week before, instance not present
+    now = t.time()-3600*24*7
+    instance_action_id = None
+    nb_deleted = 0
+    while True:
+        actions_to_delete = mydb.get_rows(
+            SELECT=("item", "item_id", "instance_action_id"),
+            FROM="vim_wim_actions as va join instance_actions as ia on va.instance_action_id=ia.uuid "
+                    "left join instance_scenarios as i on ia.instance_id=i.uuid",
+            WHERE={"va.action": "DELETE", "va.modified_at<": now, "i.uuid": None,
+                   "va.status": ("DONE", "SUPERSEDED")},
+            LIMIT=100
+        )
+        for to_delete in actions_to_delete:
+            mydb.delete_row(FROM="vim_wim_actions", WHERE=to_delete)
+            if instance_action_id != to_delete["instance_action_id"]:
+                instance_action_id = to_delete["instance_action_id"]
+                mydb.delete_row(FROM="instance_actions", WHERE={"uuid": instance_action_id})
+        nb_deleted += len(actions_to_delete)
+        if len(actions_to_delete) < 100:
+            break
+    # clean locks
+    mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
+
+    if nb_deleted:
+        logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
+
+
+def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
+    '''Obtain flavorList
+    return result, content:
+        <0, error_text upon error
+        nb_records, flavor_list on success
+    '''
+    WHERE_dict={}
+    WHERE_dict['vnf_id'] = vnf_id
+    if nfvo_tenant is not None:
+        WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+
+    #result, content = mydb.get_table(FROM='vms join vnfs on vms.vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
+    #result, content = mydb.get_table(FROM='vms',SELECT=('vim_flavor_id',),WHERE=WHERE_dict )
+    flavors = mydb.get_rows(FROM='vms join flavors on vms.flavor_id=flavors.uuid',SELECT=('flavor_id',),WHERE=WHERE_dict )
+    #print "get_flavor_list result:", result
+    #print "get_flavor_list content:", content
+    flavorList=[]
+    for flavor in flavors:
+        flavorList.append(flavor['flavor_id'])
+    return flavorList
+
+
+def get_imagelist(mydb, vnf_id, nfvo_tenant=None):
+    """
+    Get used images of all vms belonging to this VNFD
+    :param mydb: database conector
+    :param vnf_id: vnfd uuid
+    :param nfvo_tenant: tenant, not used
+    :return: The list of image uuid used
+    """
+    image_list = []
+    vms = mydb.get_rows(SELECT=('image_id','image_list'), FROM='vms', WHERE={'vnf_id': vnf_id})
+    for vm in vms:
+        if vm["image_id"] and vm["image_id"] not in image_list:
+            image_list.append(vm["image_id"])
+        if vm["image_list"]:
+            vm_image_list = yaml.load(vm["image_list"], Loader=yaml.Loader)
+            for image_dict in vm_image_list:
+                if image_dict["image_id"] not in image_list:
+                    image_list.append(image_dict["image_id"])
+    return image_list
+
+
+def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, datacenter_tenant_id=None,
+            vim_tenant=None, vim_tenant_name=None, vim_user=None, vim_passwd=None, ignore_errors=False):
+    '''Obtain a dictionary of VIM (datacenter) classes with some of the input parameters
+    return dictionary with {datacenter_id: vim_class, ... }. vim_class contain:
+            'nfvo_tenant_id','datacenter_id','vim_tenant_id','vim_url','vim_url_admin','datacenter_name','type','user','passwd'
+        raise exception upon error
+    '''
+    global plugins
+    WHERE_dict={}
+    if nfvo_tenant     is not None:  WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+    if datacenter_id   is not None:  WHERE_dict['d.uuid']  = datacenter_id
+    if datacenter_tenant_id is not None:  WHERE_dict['datacenter_tenant_id']  = datacenter_tenant_id
+    if datacenter_name is not None:  WHERE_dict['d.name']  = datacenter_name
+    if vim_tenant      is not None:  WHERE_dict['dt.vim_tenant_id']  = vim_tenant
+    if vim_tenant_name is not None:  WHERE_dict['vim_tenant_name']  = vim_tenant_name
+    if nfvo_tenant or vim_tenant or vim_tenant_name or datacenter_tenant_id:
+        from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+        select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
+                   'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
+                   'user','passwd', 'dt.config as dt_config')
+    else:
+        from_ = 'datacenters as d'
+        select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name')
+    try:
+        vims = mydb.get_rows(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
+        vim_dict={}
+        for vim in vims:
+            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+                   'datacenter_id': vim.get('datacenter_id'),
+                   '_vim_type_internal': vim.get('type')}
+            if vim["config"]:
+                extra.update(yaml.load(vim["config"], Loader=yaml.Loader))
+            if vim.get('dt_config'):
+                extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+            plugin_name = "rovim_" + vim["type"]
+            if plugin_name not in plugins:
+                try:
+                    _load_vim_plugin(plugin_name)
+                except NfvoException as e:
+                    if ignore_errors:
+                        logger.error("{}".format(e))
+                        continue
+                    else:
+                        raise
+            try:
+                if 'datacenter_tenant_id' in vim:
+                    thread_id = vim["datacenter_tenant_id"]
+                    if thread_id not in vim_persistent_info:
+                        vim_persistent_info[thread_id] = {}
+                    persistent_info = vim_persistent_info[thread_id]
+                else:
+                    persistent_info = {}
+                #if not tenant:
+                #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
+                vim_dict[vim['datacenter_id']] = plugins[plugin_name].vimconnector(
+                                uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+                                tenant_id=vim.get('vim_tenant_id',vim_tenant),
+                                tenant_name=vim.get('vim_tenant_name',vim_tenant_name),
+                                url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+                                user=vim.get('user',vim_user), passwd=vim.get('passwd',vim_passwd),
+                                config=extra, persistent_info=persistent_info
+                        )
+            except Exception as e:
+                if ignore_errors:
+                    logger.error("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)))
+                    continue
+                http_code = httperrors.Internal_Server_Error
+                if isinstance(e, vimconn.vimconnException):
+                    http_code = e.http_code
+                raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), http_code)
+        return vim_dict
+    except db_base_Exception as e:
+        raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+
+
+def rollback(mydb,  vims, rollback_list):
+    undeleted_items=[]
+    #delete things by reverse order
+    for i in range(len(rollback_list)-1, -1, -1):
+        item = rollback_list[i]
+        if item["where"]=="vim":
+            if item["vim_id"] not in vims:
+                continue
+            if is_task_id(item["uuid"]):
+                continue
+            vim = vims[item["vim_id"]]
+            try:
+                if item["what"]=="image":
+                    vim.delete_image(item["uuid"])
+                    mydb.delete_row(FROM="datacenters_images", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
+                elif item["what"]=="flavor":
+                    vim.delete_flavor(item["uuid"])
+                    mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
+                elif item["what"]=="network":
+                    vim.delete_network(item["uuid"])
+                elif item["what"]=="vm":
+                    vim.delete_vminstance(item["uuid"])
+            except vimconn.vimconnException as e:
+                logger.error("Error in rollback. Not possible to delete VIM %s '%s'. Message: %s", item['what'], item["uuid"], str(e))
+                undeleted_items.append("{} {} from VIM {}".format(item['what'], item["uuid"], vim["name"]))
+            except db_base_Exception as e:
+                logger.error("Error in rollback. Not possible to delete %s '%s' from DB.datacenters Message: %s", item['what'], item["uuid"], str(e))
+
+        else: # where==mano
+            try:
+                if item["what"]=="image":
+                    mydb.delete_row(FROM="images", WHERE={"uuid": item["uuid"]})
+                elif item["what"]=="flavor":
+                    mydb.delete_row(FROM="flavors", WHERE={"uuid": item["uuid"]})
+            except db_base_Exception as e:
+                logger.error("Error in rollback. Not possible to delete %s '%s' from DB. Message: %s", item['what'], item["uuid"], str(e))
+                undeleted_items.append("{} '{}'".format(item['what'], item["uuid"]))
+    if len(undeleted_items)==0:
+        return True," Rollback successful."
+    else:
+        return False," Rollback fails to delete: " + str(undeleted_items)
+
+
+def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
+    global global_config
+    #create a dictionary with vnfc-name: vnfc:interface-list  key:values pairs
+    vnfc_interfaces={}
+    for vnfc in vnf_descriptor["vnf"]["VNFC"]:
+        name_dict = {}
+        #dataplane interfaces
+        for numa in vnfc.get("numas",() ):
+            for interface in numa.get("interfaces",()):
+                if interface["name"] in name_dict:
+                    raise NfvoException(
+                        "Error at vnf:VNFC[name:'{}']:numas:interfaces:name, interface name '{}' already used in this VNFC".format(
+                            vnfc["name"], interface["name"]),
+                        httperrors.Bad_Request)
+                name_dict[ interface["name"] ] = "underlay"
+        #bridge interfaces
+        for interface in vnfc.get("bridge-ifaces",() ):
+            if interface["name"] in name_dict:
+                raise NfvoException(
+                    "Error at vnf:VNFC[name:'{}']:bridge-ifaces:name, interface name '{}' already used in this VNFC".format(
+                        vnfc["name"], interface["name"]),
+                    httperrors.Bad_Request)
+            name_dict[ interface["name"] ] = "overlay"
+        vnfc_interfaces[ vnfc["name"] ] = name_dict
+        # check bood-data info
+        # if "boot-data" in vnfc:
+        #     # check that user-data is incompatible with users and config-files
+        #     if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
+        #         raise NfvoException(
+        #             "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
+        #             httperrors.Bad_Request)
+
+    #check if the info in external_connections matches with the one in the vnfcs
+    name_list=[]
+    for external_connection in vnf_descriptor["vnf"].get("external-connections",() ):
+        if external_connection["name"] in name_list:
+            raise NfvoException(
+                "Error at vnf:external-connections:name, value '{}' already used as an external-connection".format(
+                    external_connection["name"]),
+                httperrors.Bad_Request)
+        name_list.append(external_connection["name"])
+        if external_connection["VNFC"] not in vnfc_interfaces:
+            raise NfvoException(
+                "Error at vnf:external-connections[name:'{}']:VNFC, value '{}' does not match any VNFC".format(
+                    external_connection["name"], external_connection["VNFC"]),
+                httperrors.Bad_Request)
+
+        if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
+            raise NfvoException(
+                "Error at vnf:external-connections[name:'{}']:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+                    external_connection["name"],
+                    external_connection["local_iface_name"]),
+                httperrors.Bad_Request )
+
+    #check if the info in internal_connections matches with the one in the vnfcs
+    name_list=[]
+    for internal_connection in vnf_descriptor["vnf"].get("internal-connections",() ):
+        if internal_connection["name"] in name_list:
+            raise NfvoException(
+                "Error at vnf:internal-connections:name, value '{}' already used as an internal-connection".format(
+                    internal_connection["name"]),
+                httperrors.Bad_Request)
+        name_list.append(internal_connection["name"])
+        #We should check that internal-connections of type "ptp" have only 2 elements
+
+        if len(internal_connection["elements"])>2 and (internal_connection.get("type") == "ptp" or internal_connection.get("type") == "e-line"):
+            raise NfvoException(
+                "Error at 'vnf:internal-connections[name:'{}']:elements', size must be 2 for a '{}' type. Consider change it to '{}' type".format(
+                    internal_connection["name"],
+                    'ptp' if vnf_descriptor_version==1 else 'e-line',
+                    'data' if vnf_descriptor_version==1 else "e-lan"),
+                httperrors.Bad_Request)
+        for port in internal_connection["elements"]:
+            vnf = port["VNFC"]
+            iface = port["local_iface_name"]
+            if vnf not in vnfc_interfaces:
+                raise NfvoException(
+                    "Error at vnf:internal-connections[name:'{}']:elements[]:VNFC, value '{}' does not match any VNFC".format(
+                        internal_connection["name"], vnf),
+                    httperrors.Bad_Request)
+            if iface not in vnfc_interfaces[ vnf ]:
+                raise NfvoException(
+                    "Error at vnf:internal-connections[name:'{}']:elements[]:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+                        internal_connection["name"], iface),
+                    httperrors.Bad_Request)
+                return -httperrors.Bad_Request,
+            if vnf_descriptor_version==1 and "type" not in internal_connection:
+                if vnfc_interfaces[vnf][iface] == "overlay":
+                    internal_connection["type"] = "bridge"
+                else:
+                    internal_connection["type"] = "data"
+            if vnf_descriptor_version==2 and "implementation" not in internal_connection:
+                if vnfc_interfaces[vnf][iface] == "overlay":
+                    internal_connection["implementation"] = "overlay"
+                else:
+                    internal_connection["implementation"] = "underlay"
+            if (internal_connection.get("type") == "data" or internal_connection.get("type") == "ptp" or \
+                internal_connection.get("implementation") == "underlay") and vnfc_interfaces[vnf][iface] == "overlay":
+                raise NfvoException(
+                    "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+                        internal_connection["name"],
+                        iface, 'bridge' if vnf_descriptor_version==1 else 'overlay',
+                        'data' if vnf_descriptor_version==1 else 'underlay'),
+                    httperrors.Bad_Request)
+            if (internal_connection.get("type") == "bridge" or internal_connection.get("implementation") == "overlay") and \
+                vnfc_interfaces[vnf][iface] == "underlay":
+                raise NfvoException(
+                    "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+                        internal_connection["name"], iface,
+                        'data' if vnf_descriptor_version==1 else 'underlay',
+                        'bridge' if vnf_descriptor_version==1 else 'overlay'),
+                    httperrors.Bad_Request)
+
+
+def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=None):
+    #look if image exist
+    if only_create_at_vim:
+        image_mano_id = image_dict['uuid']
+        if return_on_error == None:
+            return_on_error = True
+    else:
+        if image_dict['location']:
+            images = mydb.get_rows(FROM="images", WHERE={'location':image_dict['location'], 'metadata':image_dict['metadata']})
+        else:
+            images = mydb.get_rows(FROM="images", WHERE={'universal_name':image_dict['universal_name'], 'checksum':image_dict['checksum']})
+        if len(images)>=1:
+            image_mano_id = images[0]['uuid']
+        else:
+            #create image in MANO DB
+            temp_image_dict={'name':image_dict['name'],         'description':image_dict.get('description',None),
+                            'location':image_dict['location'],  'metadata':image_dict.get('metadata',None),
+                            'universal_name':image_dict['universal_name'] , 'checksum':image_dict['checksum']
+                            }
+            #temp_image_dict['location'] = image_dict.get('new_location') if image_dict['location'] is None
+            image_mano_id = mydb.new_row('images', temp_image_dict, add_uuid=True)
+            rollback_list.append({"where":"mano", "what":"image","uuid":image_mano_id})
+    #create image at every vim
+    for vim_id,vim in vims.items():
+        datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
+        image_created="false"
+        #look at database
+        image_db = mydb.get_rows(FROM="datacenters_images",
+                                 WHERE={'datacenter_vim_id': datacenter_vim_id, 'image_id': image_mano_id})
+        #look at VIM if this image exist
+        try:
+            if image_dict['location'] is not None:
+                image_vim_id = vim.get_image_id_from_path(image_dict['location'])
+            else:
+                filter_dict = {}
+                filter_dict['name'] = image_dict['universal_name']
+                if image_dict.get('checksum') != None:
+                    filter_dict['checksum'] = image_dict['checksum']
+                #logger.debug('>>>>>>>> Filter dict: %s', str(filter_dict))
+                vim_images = vim.get_image_list(filter_dict)
+                #logger.debug('>>>>>>>> VIM images: %s', str(vim_images))
+                if len(vim_images) > 1:
+                    raise vimconn.vimconnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), httperrors.Conflict)
+                elif len(vim_images) == 0:
+                    raise vimconn.vimconnNotFoundException("Image not found at VIM with filter: '{}'".format(str(filter_dict)))
+                else:
+                    #logger.debug('>>>>>>>> VIM image 0: %s', str(vim_images[0]))
+                    image_vim_id = vim_images[0]['id']
+
+        except vimconn.vimconnNotFoundException as e:
+            #Create the image in VIM only if image_dict['location'] or image_dict['new_location'] is not None
+            try:
+                #image_dict['location']=image_dict.get('new_location') if image_dict['location'] is None
+                if image_dict['location']:
+                    image_vim_id = vim.new_image(image_dict)
+                    rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"image","uuid":image_vim_id})
+                    image_created="true"
+                else:
+                    #If we reach this point, then the image has image name, and optionally checksum, and could not be found
+                    raise vimconn.vimconnException(str(e))
+            except vimconn.vimconnException as e:
+                if return_on_error:
+                    logger.error("Error creating image at VIM '%s': %s", vim["name"], str(e))
+                    raise
+                image_vim_id = None
+                logger.warn("Error creating image at VIM '%s': %s", vim["name"], str(e))
+                continue
+        except vimconn.vimconnException as e:
+            if return_on_error:
+                logger.error("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+                raise
+            logger.warn("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+            image_vim_id = None
+            continue
+        #if we reach here, the image has been created or existed
+        if len(image_db)==0:
+            #add new vim_id at datacenters_images
+            mydb.new_row('datacenters_images', {'datacenter_vim_id': datacenter_vim_id,
+                                                'image_id':image_mano_id,
+                                                'vim_id': image_vim_id,
+                                                'created':image_created})
+        elif image_db[0]["vim_id"]!=image_vim_id:
+            #modify existing vim_id at datacenters_images
+            mydb.update_rows('datacenters_images', UPDATE={'vim_id':image_vim_id}, WHERE={'datacenter_vim_id':vim_id, 'image_id':image_mano_id})
+
+    return image_vim_id if only_create_at_vim else image_mano_id
+
+
+def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
+    temp_flavor_dict= {'disk':flavor_dict.get('disk',0),
+            'ram':flavor_dict.get('ram'),
+            'vcpus':flavor_dict.get('vcpus'),
+        }
+    if 'extended' in flavor_dict and flavor_dict['extended']==None:
+        del flavor_dict['extended']
+    if 'extended' in flavor_dict:
+        temp_flavor_dict['extended']=yaml.safe_dump(flavor_dict['extended'],default_flow_style=True,width=256)
+
+    #look if flavor exist
+    if only_create_at_vim:
+        flavor_mano_id = flavor_dict['uuid']
+        if return_on_error == None:
+            return_on_error = True
+    else:
+        flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+        if len(flavors)>=1:
+            flavor_mano_id = flavors[0]['uuid']
+        else:
+            #create flavor
+            #create one by one the images of aditional disks
+            dev_image_list=[] #list of images
+            if 'extended' in flavor_dict and flavor_dict['extended']!=None:
+                dev_nb=0
+                for device in flavor_dict['extended'].get('devices',[]):
+                    if "image" not in device and "image name" not in device:
+                        continue
+                    image_dict={}
+                    image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+                    image_dict['universal_name']=device.get('image name')
+                    image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+                    image_dict['location']=device.get('image')
+                    #image_dict['new_location']=vnfc.get('image location')
+                    image_dict['checksum']=device.get('image checksum')
+                    image_metadata_dict = device.get('image metadata', None)
+                    image_metadata_str = None
+                    if image_metadata_dict != None:
+                        image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+                    image_dict['metadata']=image_metadata_str
+                    image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+                    #print "Additional disk image id for VNFC %s: %s" % (flavor_dict['name']+str(dev_nb)+"-img", image_id)
+                    dev_image_list.append(image_id)
+                    dev_nb += 1
+            temp_flavor_dict['name'] = flavor_dict['name']
+            temp_flavor_dict['description'] = flavor_dict.get('description',None)
+            content = mydb.new_row('flavors', temp_flavor_dict, add_uuid=True)
+            flavor_mano_id= content
+            rollback_list.append({"where":"mano", "what":"flavor","uuid":flavor_mano_id})
+    #create flavor at every vim
+    if 'uuid' in flavor_dict:
+        del flavor_dict['uuid']
+    flavor_vim_id=None
+    for vim_id,vim in vims.items():
+        datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
+        flavor_created="false"
+        #look at database
+        flavor_db = mydb.get_rows(FROM="datacenters_flavors",
+                                  WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
+        #look at VIM if this flavor exist  SKIPPED
+        #res_vim, flavor_vim_id = vim.get_flavor_id_from_path(flavor_dict['location'])
+        #if res_vim < 0:
+        #    print "Error contacting VIM to know if the flavor %s existed previously." %flavor_vim_id
+        #    continue
+        #elif res_vim==0:
+
+        # Create the flavor in VIM
+        # Translate images at devices from MANO id to VIM id
+        disk_list = []
+        if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
+            # make a copy of original devices
+            devices_original=[]
+
+            for device in flavor_dict["extended"].get("devices",[]):
+                dev={}
+                dev.update(device)
+                devices_original.append(dev)
+                if 'image' in device:
+                    del device['image']
+                if 'image metadata' in device:
+                    del device['image metadata']
+                if 'image checksum' in device:
+                    del device['image checksum']
+            dev_nb = 0
+            for index in range(0,len(devices_original)) :
+                device=devices_original[index]
+                if "image" not in device and "image name" not in device:
+                    # if 'size' in device:
+                    disk_list.append({'size': device.get('size', default_volume_size), 'name': device.get('name')})
+                    continue
+                image_dict={}
+                image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+                image_dict['universal_name']=device.get('image name')
+                image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+                image_dict['location']=device.get('image')
+                # image_dict['new_location']=device.get('image location')
+                image_dict['checksum']=device.get('image checksum')
+                image_metadata_dict = device.get('image metadata', None)
+                image_metadata_str = None
+                if image_metadata_dict != None:
+                    image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+                image_dict['metadata']=image_metadata_str
+                image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=return_on_error )
+                image_dict["uuid"]=image_mano_id
+                image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True, return_on_error=return_on_error)
+
+                #save disk information (image must be based on and size
+                disk_list.append({'image_id': image_vim_id, 'size': device.get('size', default_volume_size)})
+
+                flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
+                dev_nb += 1
+        if len(flavor_db)>0:
+            #check that this vim_id exist in VIM, if not create
+            flavor_vim_id=flavor_db[0]["vim_id"]
+            try:
+                vim.get_flavor(flavor_vim_id)
+                continue #flavor exist
+            except vimconn.vimconnException:
+                pass
+        #create flavor at vim
+        logger.debug("nfvo.create_or_use_flavor() adding flavor to VIM %s", vim["name"])
+        try:
+            flavor_vim_id = None
+            flavor_vim_id=vim.get_flavor_id_from_data(flavor_dict)
+            flavor_created="false"
+        except vimconn.vimconnException as e:
+            pass
+        try:
+            if not flavor_vim_id:
+                flavor_vim_id = vim.new_flavor(flavor_dict)
+                rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"flavor","uuid":flavor_vim_id})
+                flavor_created="true"
+        except vimconn.vimconnException as e:
+            if return_on_error:
+                logger.error("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+                raise
+            logger.warn("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+            flavor_vim_id = None
+            continue
+        #if reach here the flavor has been create or exist
+        if len(flavor_db)==0:
+            #add new vim_id at datacenters_flavors
+            extended_devices_yaml = None
+            if len(disk_list) > 0:
+                extended_devices = dict()
+                extended_devices['disks'] = disk_list
+                extended_devices_yaml = yaml.safe_dump(extended_devices,default_flow_style=True,width=256)
+            mydb.new_row('datacenters_flavors',
+                        {'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id, 'vim_id': flavor_vim_id,
+                        'created': flavor_created, 'extended': extended_devices_yaml})
+        elif flavor_db[0]["vim_id"]!=flavor_vim_id:
+            #modify existing vim_id at datacenters_flavors
+            mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id},
+                             WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
+
+    return flavor_vim_id if only_create_at_vim else flavor_mano_id
+
+
+def get_str(obj, field, length):
+    """
+    Obtain the str value,
+    :param obj:
+    :param length:
+    :return:
+    """
+    value = obj.get(field)
+    if value is not None:
+        value = str(value)[:length]
+    return value
+
+def _lookfor_or_create_image(db_image, mydb, descriptor):
+    """
+    fill image content at db_image dictionary. Check if the image with this image and checksum exist
+    :param db_image: dictionary to insert data
+    :param mydb: database connector
+    :param descriptor: yang descriptor
+    :return: uuid if the image exist at DB, or None if a new image must be created with the data filled at db_image
+    """
+
+    db_image["name"] = get_str(descriptor, "image", 255)
+    db_image["checksum"] = get_str(descriptor, "image-checksum", 32)
+    if not db_image["checksum"]:  # Ensure that if empty string, None is stored
+        db_image["checksum"] = None
+    if db_image["name"].startswith("/"):
+        db_image["location"] = db_image["name"]
+        existing_images = mydb.get_rows(FROM="images", WHERE={'location': db_image["location"]})
+    else:
+        db_image["universal_name"] = db_image["name"]
+        existing_images = mydb.get_rows(FROM="images", WHERE={'universal_name': db_image['universal_name'],
+                                                              'checksum': db_image['checksum']})
+    if existing_images:
+        return existing_images[0]["uuid"]
+    else:
+        image_uuid = str(uuid4())
+        db_image["uuid"] = image_uuid
+        return None
+
+def get_resource_allocation_params(quota_descriptor):
+    """
+    read the quota_descriptor from vnfd and fetch the resource allocation properties from the descriptor object
+    :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
+    :return: quota params for limit, reserve, shares from the descriptor object
+    """
+    quota = {}
+    if quota_descriptor.get("limit"):
+        quota["limit"] = int(quota_descriptor["limit"])
+    if quota_descriptor.get("reserve"):
+        quota["reserve"] = int(quota_descriptor["reserve"])
+    if quota_descriptor.get("shares"):
+        quota["shares"] = int(quota_descriptor["shares"])
+    return quota
+
+def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
+    """
+    Parses an OSM IM vnfd_catalog and insert at DB
+    :param mydb:
+    :param tenant_id:
+    :param vnf_descriptor:
+    :return: The list of cretated vnf ids
+    """
+    try:
+        myvnfd = vnfd_catalog.vnfd()
+        try:
+            pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,
+                                             skip_unknown=True)
+        except Exception as e:
+            raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
+        db_vnfs = []
+        db_nets = []
+        db_vms = []
+        db_vms_index = 0
+        db_interfaces = []
+        db_images = []
+        db_flavors = []
+        db_ip_profiles_index = 0
+        db_ip_profiles = []
+        uuid_list = []
+        vnfd_uuid_list = []
+        vnfd_catalog_descriptor = vnf_descriptor.get("vnfd:vnfd-catalog")
+        if not vnfd_catalog_descriptor:
+            vnfd_catalog_descriptor = vnf_descriptor.get("vnfd-catalog")
+        vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd")
+        if not vnfd_descriptor_list:
+            vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd:vnfd")
+        for vnfd_yang in myvnfd.vnfd_catalog.vnfd.values():
+            vnfd = vnfd_yang.get()
+
+            # table vnf
+            vnf_uuid = str(uuid4())
+            uuid_list.append(vnf_uuid)
+            vnfd_uuid_list.append(vnf_uuid)
+            vnfd_id = get_str(vnfd, "id", 255)
+            db_vnf = {
+                "uuid": vnf_uuid,
+                "osm_id": vnfd_id,
+                "name": get_str(vnfd, "name", 255),
+                "description": get_str(vnfd, "description", 255),
+                "tenant_id": tenant_id,
+                "vendor": get_str(vnfd, "vendor", 255),
+                "short_name": get_str(vnfd, "short-name", 255),
+                "descriptor": str(vnf_descriptor)[:60000]
+            }
+
+            for vnfd_descriptor in vnfd_descriptor_list:
+                if vnfd_descriptor["id"] == str(vnfd["id"]):
+                    break
+
+            # table ip_profiles (ip-profiles)
+            ip_profile_name2db_table_index = {}
+            for ip_profile in vnfd.get("ip-profiles").values():
+                db_ip_profile = {
+                    "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+                    "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+                    "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+                    "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+                    "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+                    "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+                }
+                dns_list = []
+                for dns in ip_profile["ip-profile-params"]["dns-server"].values():
+                    dns_list.append(str(dns.get("address")))
+                db_ip_profile["dns_address"] = ";".join(dns_list)
+                if ip_profile["ip-profile-params"].get('security-group'):
+                    db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+                ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+                db_ip_profiles_index += 1
+                db_ip_profiles.append(db_ip_profile)
+
+            # table nets (internal-vld)
+            net_id2uuid = {}  # for mapping interface with network
+            for vld in vnfd.get("internal-vld").values():
+                net_uuid = str(uuid4())
+                uuid_list.append(net_uuid)
+                db_net = {
+                    "name": get_str(vld, "name", 255),
+                    "vnf_id": vnf_uuid,
+                    "uuid": net_uuid,
+                    "description": get_str(vld, "description", 255),
+                    "osm_id": get_str(vld, "id", 255),
+                    "type": "bridge",   # TODO adjust depending on connection point type
+                }
+                net_id2uuid[vld.get("id")] = net_uuid
+                db_nets.append(db_net)
+                # ip-profile, link db_ip_profile with db_sce_net
+                if vld.get("ip-profile-ref"):
+                    ip_profile_name = vld.get("ip-profile-ref")
+                    if ip_profile_name not in ip_profile_name2db_table_index:
+                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':"
+                                            "'{}'. Reference to a non-existing 'ip_profiles'".format(
+                                                str(vnfd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+                                            httperrors.Bad_Request)
+                    db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["net_id"] = net_uuid
+                else:  #check no ip-address has been defined
+                    for icp in vld.get("internal-connection-point").values():
+                        if icp.get("ip-address"):
+                            raise NfvoException("Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' "
+                                            "contains an ip-address but no ip-profile has been defined at VLD".format(
+                                                str(vnfd["id"]), str(vld["id"]), str(icp["id"])),
+                                            httperrors.Bad_Request)
+
+            # connection points vaiable declaration
+            cp_name2iface_uuid = {}
+            cp_name2vdu_id = {}
+            cp_name2vm_uuid = {}
+            cp_name2db_interface = {}
+            vdu_id2cp_name = {}  # stored only when one external connection point is presented at this VDU
+
+            # table vms (vdus)
+            vdu_id2uuid = {}
+            vdu_id2db_table_index = {}
+            mgmt_access = {}
+            for vdu in vnfd.get("vdu").values():
+
+                for vdu_descriptor in vnfd_descriptor["vdu"]:
+                    if vdu_descriptor["id"] == str(vdu["id"]):
+                        break
+                vm_uuid = str(uuid4())
+                uuid_list.append(vm_uuid)
+                vdu_id = get_str(vdu, "id", 255)
+                db_vm = {
+                    "uuid": vm_uuid,
+                    "osm_id": vdu_id,
+                    "name": get_str(vdu, "name", 255),
+                    "description": get_str(vdu, "description", 255),
+                    "pdu_type": get_str(vdu, "pdu-type", 255),
+                    "vnf_id": vnf_uuid,
+                }
+                vdu_id2uuid[db_vm["osm_id"]] = vm_uuid
+                vdu_id2db_table_index[db_vm["osm_id"]] = db_vms_index
+                if vdu.get("count"):
+                    db_vm["count"] = int(vdu["count"])
+
+                # table image
+                image_present = False
+                if vdu.get("image"):
+                    image_present = True
+                    db_image = {}
+                    image_uuid = _lookfor_or_create_image(db_image, mydb, vdu)
+                    if not image_uuid:
+                        image_uuid = db_image["uuid"]
+                        db_images.append(db_image)
+                    db_vm["image_id"] = image_uuid
+                if vdu.get("alternative-images"):
+                    vm_alternative_images = []
+                    for alt_image in vdu.get("alternative-images").values():
+                        db_image = {}
+                        image_uuid = _lookfor_or_create_image(db_image, mydb, alt_image)
+                        if not image_uuid:
+                            image_uuid = db_image["uuid"]
+                            db_images.append(db_image)
+                        vm_alternative_images.append({
+                            "image_id": image_uuid,
+                            "vim_type": str(alt_image["vim-type"]),
+                            # "universal_name": str(alt_image["image"]),
+                            # "checksum": str(alt_image["image-checksum"]) if alt_image.get("image-checksum") else None
+                        })
+
+                    db_vm["image_list"] = yaml.safe_dump(vm_alternative_images, default_flow_style=True, width=256)
+
+                # volumes
+                devices = []
+                if vdu.get("volumes"):
+                    for volume_key in vdu["volumes"]:
+                        volume = vdu["volumes"][volume_key]
+                        if not image_present:
+                            # Convert the first volume to vnfc.image
+                            image_present = True
+                            db_image = {}
+                            image_uuid = _lookfor_or_create_image(db_image, mydb, volume)
+                            if not image_uuid:
+                                image_uuid = db_image["uuid"]
+                                db_images.append(db_image)
+                            db_vm["image_id"] = image_uuid
+                        else:
+                            # Add Openmano devices
+                            device = {"name": str(volume.get("name"))}
+                            device["type"] = str(volume.get("device-type"))
+                            if volume.get("size"):
+                                device["size"] = int(volume["size"])
+                            if volume.get("image"):
+                                device["image name"] = str(volume["image"])
+                                if volume.get("image-checksum"):
+                                    device["image checksum"] = str(volume["image-checksum"])
+
+                            devices.append(device)
+
+                if not db_vm.get("image_id"):
+                    if not db_vm["pdu_type"]:
+                        raise NfvoException("Not defined image for VDU")
+                    # create a fake image
+
+                # cloud-init
+                boot_data = {}
+                if vdu.get("cloud-init"):
+                    boot_data["user-data"] = str(vdu["cloud-init"])
+                elif vdu.get("cloud-init-file"):
+                    # TODO Where this file content is present???
+                    # boot_data["user-data"] = vnfd_yang.files[vdu["cloud-init-file"]]
+                    boot_data["user-data"] = str(vdu["cloud-init-file"])
+
+                if vdu.get("supplemental-boot-data"):
+                    if vdu["supplemental-boot-data"].get('boot-data-drive'):
+                            boot_data['boot-data-drive'] = True
+                    if vdu["supplemental-boot-data"].get('config-file'):
+                        om_cfgfile_list = list()
+                        for custom_config_file in vdu["supplemental-boot-data"]['config-file'].values():
+                            # TODO Where this file content is present???
+                            cfg_source = str(custom_config_file["source"])
+                            om_cfgfile_list.append({"dest": custom_config_file["dest"],
+                                                    "content": cfg_source})
+                        boot_data['config-files'] = om_cfgfile_list
+                if boot_data:
+                    db_vm["boot_data"] = yaml.safe_dump(boot_data, default_flow_style=True, width=256)
+
+                db_vms.append(db_vm)
+                db_vms_index += 1
+
+                # table interfaces (internal/external interfaces)
+                flavor_epa_interfaces = []
+                # for iface in chain(vdu.get("internal-interface").values(), vdu.get("external-interface").values()):
+                for iface in vdu.get("interface").values():
+                    flavor_epa_interface = {}
+                    iface_uuid = str(uuid4())
+                    uuid_list.append(iface_uuid)
+                    db_interface = {
+                        "uuid": iface_uuid,
+                        "internal_name": get_str(iface, "name", 255),
+                        "vm_id": vm_uuid,
+                    }
+                    flavor_epa_interface["name"] = db_interface["internal_name"]
+                    if iface.get("virtual-interface").get("vpci"):
+                        db_interface["vpci"] = get_str(iface.get("virtual-interface"), "vpci", 12)
+                        flavor_epa_interface["vpci"] = db_interface["vpci"]
+
+                    if iface.get("virtual-interface").get("bandwidth"):
+                        bps = int(iface.get("virtual-interface").get("bandwidth"))
+                        db_interface["bw"] = int(math.ceil(bps / 1000000.0))
+                        flavor_epa_interface["bandwidth"] = "{} Mbps".format(db_interface["bw"])
+
+                    if iface.get("virtual-interface").get("type") == "OM-MGMT":
+                        db_interface["type"] = "mgmt"
+                    elif iface.get("virtual-interface").get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+                        db_interface["type"] = "bridge"
+                        db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+                    elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+                        db_interface["type"] = "data"
+                        db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+                        flavor_epa_interface["dedicated"] = "no" if iface["virtual-interface"]["type"] == "SR-IOV" \
+                            else "yes"
+                        flavor_epa_interfaces.append(flavor_epa_interface)
+                    else:
+                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual"
+                                            "-interface':'type':'{}'. Interface type is not supported".format(
+                                                vnfd_id, vdu_id, iface.get("virtual-interface").get("type")),
+                                            httperrors.Bad_Request)
+
+                    if iface.get("mgmt-interface"):
+                        db_interface["type"] = "mgmt"
+
+                    if iface.get("external-connection-point-ref"):
+                        try:
+                            cp = vnfd.get("connection-point")[iface.get("external-connection-point-ref")]
+                            db_interface["external_name"] = get_str(cp, "name", 255)
+                            cp_name2iface_uuid[db_interface["external_name"]] = iface_uuid
+                            cp_name2vdu_id[db_interface["external_name"]] = vdu_id
+                            cp_name2vm_uuid[db_interface["external_name"]] = vm_uuid
+                            cp_name2db_interface[db_interface["external_name"]] = db_interface
+                            for cp_descriptor in vnfd_descriptor["connection-point"]:
+                                if cp_descriptor["name"] == db_interface["external_name"]:
+                                    break
+                            else:
+                                raise KeyError()
+
+                            if vdu_id in vdu_id2cp_name:
+                                vdu_id2cp_name[vdu_id] = None  # more than two connecdtion point for this VDU
+                            else:
+                                vdu_id2cp_name[vdu_id] = db_interface["external_name"]
+
+                            # port security
+                            if str(cp_descriptor.get("port-security-enabled")).lower() == "false":
+                                db_interface["port_security"] = 0
+                            elif str(cp_descriptor.get("port-security-enabled")).lower() == "true":
+                                db_interface["port_security"] = 1
+                        except KeyError:
+                            raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
+                                                "'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present"
+                                                " at connection-point".format(
+                                                    vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
+                                                    cp=iface.get("vnfd-connection-point-ref")),
+                                                httperrors.Bad_Request)
+                    elif iface.get("internal-connection-point-ref"):
+                        try:
+                            for icp_descriptor in vdu_descriptor["internal-connection-point"]:
+                                if icp_descriptor["id"] == str(iface.get("internal-connection-point-ref")):
+                                    break
+                            else:
+                                raise KeyError("does not exist at vdu:internal-connection-point")
+                            icp = None
+                            icp_vld = None
+                            for vld in vnfd.get("internal-vld").values():
+                                for cp in vld.get("internal-connection-point").values():
+                                    if cp.get("id-ref") == iface.get("internal-connection-point-ref"):
+                                        if icp:
+                                            raise KeyError("is referenced by more than one 'internal-vld'")
+                                        icp = cp
+                                        icp_vld = vld
+                            if not icp:
+                                raise KeyError("is not referenced by any 'internal-vld'")
+
+                            db_interface["net_id"] = net_id2uuid[icp_vld.get("id")]
+                            if str(icp_descriptor.get("port-security-enabled")).lower() == "false":
+                                db_interface["port_security"] = 0
+                            elif str(icp_descriptor.get("port-security-enabled")).lower() == "true":
+                                db_interface["port_security"] = 1
+                            if icp.get("ip-address"):
+                                if not icp_vld.get("ip-profile-ref"):
+                                    raise NfvoException
+                                db_interface["ip_address"] = str(icp.get("ip-address"))
+                        except KeyError as e:
+                            raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
+                                                "'interface[{iface}]':'internal-connection-point-ref':'{cp}'"
+                                                " {msg}".format(
+                                                    vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
+                                                    cp=iface.get("internal-connection-point-ref"), msg=str(e)),
+                                                httperrors.Bad_Request)
+                    if iface.get("position"):
+                        db_interface["created_at"] = int(iface.get("position")) * 50
+                    if iface.get("mac-address"):
+                        db_interface["mac"] = str(iface.get("mac-address"))
+                    db_interfaces.append(db_interface)
+
+                # table flavors
+                db_flavor = {
+                    "name": get_str(vdu, "name", 250) + "-flv",
+                    "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)),
+                    "ram": int(vdu["vm-flavor"].get("memory-mb", 1)),
+                    "disk": int(vdu["vm-flavor"].get("storage-gb", 0)),
+                }
+                # TODO revise the case of several numa-node-policy node
+                extended = {}
+                numa = {}
+                if devices:
+                    extended["devices"] = devices
+                if flavor_epa_interfaces:
+                    numa["interfaces"] = flavor_epa_interfaces
+                if vdu.get("guest-epa"):   # TODO or dedicated_int:
+                    epa_vcpu_set = False
+                    if vdu["guest-epa"].get("numa-node-policy"):  # TODO or dedicated_int:
+                        numa_node_policy = vdu["guest-epa"].get("numa-node-policy")
+                        if numa_node_policy.get("node"):
+                            numa_node = next(iter(numa_node_policy["node"].values()))
+                            if numa_node.get("num-cores"):
+                                numa["cores"] = numa_node["num-cores"]
+                                epa_vcpu_set = True
+                            if numa_node.get("paired-threads"):
+                                if numa_node["paired-threads"].get("num-paired-threads"):
+                                    numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
+                                    epa_vcpu_set = True
+                                if len(numa_node["paired-threads"].get("paired-thread-ids")):
+                                    numa["paired-threads-id"] = []
+                                    for pair in numa_node["paired-threads"]["paired-thread-ids"].values():
+                                        numa["paired-threads-id"].append(
+                                            (str(pair["thread-a"]), str(pair["thread-b"]))
+                                        )
+                            if numa_node.get("num-threads"):
+                                numa["threads"] = int(numa_node["num-threads"])
+                                epa_vcpu_set = True
+                            if numa_node.get("memory-mb"):
+                                numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
+                    if vdu["guest-epa"].get("mempage-size"):
+                        if vdu["guest-epa"]["mempage-size"] != "SMALL":
+                            numa["memory"] = max(int(db_flavor["ram"] / 1024), 1)
+                    if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
+                        if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
+                            if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \
+                                            vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
+                                numa["cores"] = max(db_flavor["vcpus"], 1)
+                            else:
+                                numa["threads"] = max(db_flavor["vcpus"], 1)
+                            epa_vcpu_set = True
+                    if vdu["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
+                        cpuquota = get_resource_allocation_params(vdu["guest-epa"].get("cpu-quota"))
+                        if cpuquota:
+                            extended["cpu-quota"] = cpuquota
+                    if vdu["guest-epa"].get("mem-quota"):
+                        vduquota = get_resource_allocation_params(vdu["guest-epa"].get("mem-quota"))
+                        if vduquota:
+                            extended["mem-quota"] = vduquota
+                    if vdu["guest-epa"].get("disk-io-quota"):
+                        diskioquota = get_resource_allocation_params(vdu["guest-epa"].get("disk-io-quota"))
+                        if diskioquota:
+                            extended["disk-io-quota"] = diskioquota
+                    if vdu["guest-epa"].get("vif-quota"):
+                        vifquota = get_resource_allocation_params(vdu["guest-epa"].get("vif-quota"))
+                        if vifquota:
+                            extended["vif-quota"] = vifquota
+                if numa:
+                    extended["numas"] = [numa]
+                if extended:
+                    extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)
+                    db_flavor["extended"] = extended_text
+                # look if flavor exist
+                temp_flavor_dict = {'disk': db_flavor.get('disk', 0),
+                                    'ram': db_flavor.get('ram'),
+                                    'vcpus': db_flavor.get('vcpus'),
+                                    'extended': db_flavor.get('extended')
+                                    }
+                existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+                if existing_flavors:
+                    flavor_uuid = existing_flavors[0]["uuid"]
+                else:
+                    flavor_uuid = str(uuid4())
+                    uuid_list.append(flavor_uuid)
+                    db_flavor["uuid"] = flavor_uuid
+                    db_flavors.append(db_flavor)
+                db_vm["flavor_id"] = flavor_uuid
+
+            # VNF affinity and antiaffinity
+            for pg in vnfd.get("placement-groups").values():
+                pg_name = get_str(pg, "name", 255)
+                for vdu in pg.get("member-vdus").values():
+                    vdu_id = get_str(vdu, "member-vdu-ref", 255)
+                    if vdu_id not in vdu_id2db_table_index:
+                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':"
+                                            "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
+                                                vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
+                                            httperrors.Bad_Request)
+                    db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
+                    # TODO consider the case of isolation and not colocation
+                    # if pg.get("strategy") == "ISOLATION":
+
+            # VNF mgmt configuration
+            if vnfd["mgmt-interface"].get("vdu-id"):
+                mgmt_vdu_id = get_str(vnfd["mgmt-interface"], "vdu-id", 255)
+                if mgmt_vdu_id not in vdu_id2uuid:
+                    raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':"
+                                        "'{vdu}'. Reference to a non-existing vdu".format(
+                                            vnf=vnfd_id, vdu=mgmt_vdu_id),
+                                        httperrors.Bad_Request)
+                mgmt_access["vm_id"] = vdu_id2uuid[mgmt_vdu_id]
+                mgmt_access["vdu-id"] = mgmt_vdu_id
+                # if only one cp is defined by this VDU, mark this interface as of type "mgmt"
+                if vdu_id2cp_name.get(mgmt_vdu_id):
+                    if cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]:
+                        cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
+
+            if vnfd["mgmt-interface"].get("ip-address"):
+                mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address"))
+            if vnfd["mgmt-interface"].get("cp"):
+                if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid:
+                    raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. "
+                                        "Reference to a non-existing connection-point".format(
+                                            vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
+                                        httperrors.Bad_Request)
+                mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
+                mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
+                mgmt_access["vdu-id"] = cp_name2vdu_id[vnfd["mgmt-interface"]["cp"]]
+                # mark this interface as of type mgmt
+                if cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]:
+                    cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]["type"] = "mgmt"
+
+            default_user = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
+                                    "default-user", 64)
+            if default_user:
+                mgmt_access["default_user"] = default_user
+
+            required = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
+                                   "required", 6)
+            if required:
+                mgmt_access["required"] = required
+
+            password_ = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}),
+                                   "password", 64)
+            if password_:
+                mgmt_access["password"] = password_
+
+            if mgmt_access:
+                db_vnf["mgmt_access"] = yaml.safe_dump(mgmt_access, default_flow_style=True, width=256)
+
+            db_vnfs.append(db_vnf)
+        db_tables=[
+            {"vnfs": db_vnfs},
+            {"nets": db_nets},
+            {"images": db_images},
+            {"flavors": db_flavors},
+            {"ip_profiles": db_ip_profiles},
+            {"vms": db_vms},
+            {"interfaces": db_interfaces},
+        ]
+
+        logger.debug("create_vnf Deployment done vnfDict: %s",
+                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+        mydb.new_rows(db_tables, uuid_list)
+        return vnfd_uuid_list
+    except NfvoException:
+        raise
+    except Exception as e:
+        logger.error("Exception {}".format(e))
+        raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
+
+
+@deprecated("Use new_vnfd_v3")
+def new_vnf(mydb, tenant_id, vnf_descriptor):
+    global global_config
+
+    # Step 1. Check the VNF descriptor
+    check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1)
+    # Step 2. Check tenant exist
+    vims = {}
+    if tenant_id != "any":
+        check_tenant(mydb, tenant_id)
+        if "tenant_id" in vnf_descriptor["vnf"]:
+            if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+                                    httperrors.Unauthorized)
+        else:
+            vnf_descriptor['vnf']['tenant_id'] = tenant_id
+        # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+        if global_config["auto_push_VNF_to_VIMs"]:
+            vims = get_vim(mydb, tenant_id, ignore_errors=True)
+
+    # Step 4. Review the descriptor and add missing  fields
+    #print vnf_descriptor
+    #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+    vnf_name = vnf_descriptor['vnf']['name']
+    vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+    if "physical" in vnf_descriptor['vnf']:
+        del vnf_descriptor['vnf']['physical']
+    #print vnf_descriptor
+
+    # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+    logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+    logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+
+    #For each VNFC, we add it to the VNFCDict and we  create a flavor.
+    VNFCDict = {}     # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+    rollback_list = []    # It will contain the new images created in mano. It is used for rollback
+    try:
+        logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+        for vnfc in vnf_descriptor['vnf']['VNFC']:
+            VNFCitem={}
+            VNFCitem["name"] = vnfc['name']
+            VNFCitem["availability_zone"] = vnfc.get('availability_zone')
+            VNFCitem["description"] = vnfc.get("description", 'VM {} of the VNF {}'.format(vnfc['name'],vnf_name))
+
+            #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+
+            myflavorDict = {}
+            myflavorDict["name"] = vnfc['name']+"-flv"   #Maybe we could rename the flavor by using the field "image name" if exists
+            myflavorDict["description"] = VNFCitem["description"]
+            myflavorDict["ram"] = vnfc.get("ram", 0)
+            myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+            myflavorDict["disk"] = vnfc.get("disk", 0)
+            myflavorDict["extended"] = {}
+
+            devices = vnfc.get("devices")
+            if devices != None:
+                myflavorDict["extended"]["devices"] = devices
+
+            # TODO:
+            # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+            # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+
+            # Previous code has been commented
+            #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+            #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+            #else:
+            #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+            #    if result2:
+            #        print "Error creating flavor: unknown processor model. Rollback successful."
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+            #    else:
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+            myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
+
+            if 'numas' in vnfc and len(vnfc['numas'])>0:
+                myflavorDict['extended']['numas'] = vnfc['numas']
+
+            #print myflavorDict
+
+            # Step 6.2 New flavors are created in the VIM
+            flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+
+            #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+            VNFCitem["flavor_id"] = flavor_id
+            VNFCDict[vnfc['name']] = VNFCitem
+
+        logger.debug("Creating new images in the VIM for each VNFC")
+        # Step 6.3 New images are created in the VIM
+        #For each VNFC, we must create the appropriate image.
+        #This "for" loop might be integrated with the previous one
+        #In case this integration is made, the VNFCDict might become a VNFClist.
+        for vnfc in vnf_descriptor['vnf']['VNFC']:
+            #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+            image_dict={}
+            image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+            image_dict['universal_name']=vnfc.get('image name')
+            image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+            image_dict['location']=vnfc.get('VNFC image')
+            #image_dict['new_location']=vnfc.get('image location')
+            image_dict['checksum']=vnfc.get('image checksum')
+            image_metadata_dict = vnfc.get('image metadata', None)
+            image_metadata_str = None
+            if image_metadata_dict is not None:
+                image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+            image_dict['metadata']=image_metadata_str
+            #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+            image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+            #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+            VNFCDict[vnfc['name']]["image_id"] = image_id
+            VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+            VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
+            if vnfc.get("boot-data"):
+                VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+
+
+        # Step 7. Storing the VNF descriptor in the repository
+        if "descriptor" not in vnf_descriptor["vnf"]:
+            vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+
+        # Step 8. Adding the VNF to the NFVO DB
+        vnf_id = mydb.new_vnf_as_a_whole(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+        return vnf_id
+    except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
+        _, message = rollback(mydb, vims, rollback_list)
+        if isinstance(e, db_base_Exception):
+            error_text = "Exception at database"
+        elif isinstance(e, KeyError):
+            error_text = "KeyError exception "
+            e.http_code = httperrors.Internal_Server_Error
+        else:
+            error_text = "Exception at VIM"
+        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+        #logger.error("start_scenario %s", error_text)
+        raise NfvoException(error_text, e.http_code)
+
+
+@deprecated("Use new_vnfd_v3")
+def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
+    global global_config
+
+    # Step 1. Check the VNF descriptor
+    check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=2)
+    # Step 2. Check tenant exist
+    vims = {}
+    if tenant_id != "any":
+        check_tenant(mydb, tenant_id)
+        if "tenant_id" in vnf_descriptor["vnf"]:
+            if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+                                    httperrors.Unauthorized)
+        else:
+            vnf_descriptor['vnf']['tenant_id'] = tenant_id
+        # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+        if global_config["auto_push_VNF_to_VIMs"]:
+            vims = get_vim(mydb, tenant_id, ignore_errors=True)
+
+    # Step 4. Review the descriptor and add missing  fields
+    #print vnf_descriptor
+    #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+    vnf_name = vnf_descriptor['vnf']['name']
+    vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+    if "physical" in vnf_descriptor['vnf']:
+        del vnf_descriptor['vnf']['physical']
+    #print vnf_descriptor
+
+    # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+    logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+    logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+
+    #For each VNFC, we add it to the VNFCDict and we  create a flavor.
+    VNFCDict = {}     # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+    rollback_list = []    # It will contain the new images created in mano. It is used for rollback
+    try:
+        logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+        for vnfc in vnf_descriptor['vnf']['VNFC']:
+            VNFCitem={}
+            VNFCitem["name"] = vnfc['name']
+            VNFCitem["description"] = vnfc.get("description", 'VM {} of the VNF {}'.format(vnfc['name'],vnf_name))
+
+            #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+
+            myflavorDict = {}
+            myflavorDict["name"] = vnfc['name']+"-flv"   #Maybe we could rename the flavor by using the field "image name" if exists
+            myflavorDict["description"] = VNFCitem["description"]
+            myflavorDict["ram"] = vnfc.get("ram", 0)
+            myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+            myflavorDict["disk"] = vnfc.get("disk", 0)
+            myflavorDict["extended"] = {}
+
+            devices = vnfc.get("devices")
+            if devices != None:
+                myflavorDict["extended"]["devices"] = devices
+
+            # TODO:
+            # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+            # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+
+            # Previous code has been commented
+            #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+            #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+            #else:
+            #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+            #    if result2:
+            #        print "Error creating flavor: unknown processor model. Rollback successful."
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+            #    else:
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+            myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
+
+            if 'numas' in vnfc and len(vnfc['numas'])>0:
+                myflavorDict['extended']['numas'] = vnfc['numas']
+
+            #print myflavorDict
+
+            # Step 6.2 New flavors are created in the VIM
+            flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+
+            #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+            VNFCitem["flavor_id"] = flavor_id
+            VNFCDict[vnfc['name']] = VNFCitem
+
+        logger.debug("Creating new images in the VIM for each VNFC")
+        # Step 6.3 New images are created in the VIM
+        #For each VNFC, we must create the appropriate image.
+        #This "for" loop might be integrated with the previous one
+        #In case this integration is made, the VNFCDict might become a VNFClist.
+        for vnfc in vnf_descriptor['vnf']['VNFC']:
+            #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+            image_dict={}
+            image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+            image_dict['universal_name']=vnfc.get('image name')
+            image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+            image_dict['location']=vnfc.get('VNFC image')
+            #image_dict['new_location']=vnfc.get('image location')
+            image_dict['checksum']=vnfc.get('image checksum')
+            image_metadata_dict = vnfc.get('image metadata', None)
+            image_metadata_str = None
+            if image_metadata_dict is not None:
+                image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+            image_dict['metadata']=image_metadata_str
+            #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+            image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+            #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+            VNFCDict[vnfc['name']]["image_id"] = image_id
+            VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+            VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
+            if vnfc.get("boot-data"):
+                VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+
+        # Step 7. Storing the VNF descriptor in the repository
+        if "descriptor" not in vnf_descriptor["vnf"]:
+            vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+
+        # Step 8. Adding the VNF to the NFVO DB
+        vnf_id = mydb.new_vnf_as_a_whole2(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+        return vnf_id
+    except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
+        _, message = rollback(mydb, vims, rollback_list)
+        if isinstance(e, db_base_Exception):
+            error_text = "Exception at database"
+        elif isinstance(e, KeyError):
+            error_text = "KeyError exception "
+            e.http_code = httperrors.Internal_Server_Error
+        else:
+            error_text = "Exception at VIM"
+        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+        #logger.error("start_scenario %s", error_text)
+        raise NfvoException(error_text, e.http_code)
+
+
+def get_vnf_id(mydb, tenant_id, vnf_id):
+    #check valid tenant_id
+    check_tenant(mydb, tenant_id)
+    #obtain data
+    where_or = {}
+    if tenant_id != "any":
+        where_or["tenant_id"] = tenant_id
+        where_or["public"] = True
+    vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+
+    vnf_id = vnf["uuid"]
+    filter_keys = ('uuid', 'name', 'description', 'public', "tenant_id", "osm_id", "created_at")
+    filtered_content = dict( (k,v) for k,v in vnf.items() if k in filter_keys )
+    #change_keys_http2db(filtered_content, http2db_vnf, reverse=True)
+    data={'vnf' : filtered_content}
+    #GET VM
+    content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id',
+            SELECT=('vms.uuid as uuid', 'vms.osm_id as osm_id', 'vms.name as name', 'vms.description as description',
+                    'boot_data'),
+            WHERE={'vnfs.uuid': vnf_id} )
+    if len(content) != 0:
+        #raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+    # change boot_data into boot-data
+        for vm in content:
+            if vm.get("boot_data"):
+                vm["boot-data"] = yaml.safe_load(vm["boot_data"])
+                del vm["boot_data"]
+
+        data['vnf']['VNFC'] = content
+    #TODO: GET all the information from a VNFC and include it in the output.
+
+    #GET NET
+    content = mydb.get_rows(FROM='vnfs join nets on vnfs.uuid=nets.vnf_id',
+                                    SELECT=('nets.uuid as uuid','nets.name as name','nets.description as description', 'nets.type as type', 'nets.multipoint as multipoint'),
+                                    WHERE={'vnfs.uuid': vnf_id} )
+    data['vnf']['nets'] = content
+
+    #GET ip-profile for each net
+    for net in data['vnf']['nets']:
+        ipprofiles = mydb.get_rows(FROM='ip_profiles',
+                                   SELECT=('ip_version','subnet_address','gateway_address','dns_address','dhcp_enabled','dhcp_start_address','dhcp_count'),
+                                   WHERE={'net_id': net["uuid"]} )
+        if len(ipprofiles)==1:
+            net["ip_profile"] = ipprofiles[0]
+        elif len(ipprofiles)>1:
+            raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
+
+
+    #TODO: For each net, GET its elements and relevant info per element (VNFC, iface, ip_address) and include them in the output.
+
+    #GET External Interfaces
+    content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces on vms.uuid=interfaces.vm_id',\
+                                    SELECT=('interfaces.uuid as uuid','interfaces.external_name as external_name', 'vms.name as vm_name', 'interfaces.vm_id as vm_id', \
+                                            'interfaces.internal_name as internal_name', 'interfaces.type as type', 'interfaces.vpci as vpci','interfaces.bw as bw'),\
+                                    WHERE={'vnfs.uuid': vnf_id, 'interfaces.external_name<>': None} )
+    #print content
+    data['vnf']['external-connections'] = content
+
+    return data
+
+
+def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None):
+    # Check tenant exist
+    if tenant_id != "any":
+        check_tenant(mydb, tenant_id)
+        # Get the URL of the VIM from the nfvo_tenant and the datacenter
+        vims = get_vim(mydb, tenant_id, ignore_errors=True)
+    else:
+        vims={}
+
+    # Checking if it is a valid uuid and, if not, getting the uuid assuming that the name was provided"
+    where_or = {}
+    if tenant_id != "any":
+        where_or["tenant_id"] = tenant_id
+        where_or["public"] = True
+    vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+    vnf_id = vnf["uuid"]
+
+    # "Getting the list of flavors and tenants of the VNF"
+    flavorList = get_flavorlist(mydb, vnf_id)
+    if len(flavorList)==0:
+        logger.warn("delete_vnf error. No flavors found for the VNF id '%s'", vnf_id)
+
+    imageList = get_imagelist(mydb, vnf_id)
+    if len(imageList)==0:
+        logger.warn( "delete_vnf error. No images found for the VNF id '%s'", vnf_id)
+
+    deleted = mydb.delete_row_by_id('vnfs', vnf_id)
+    if deleted == 0:
+        raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+
+    undeletedItems = []
+    for flavor in flavorList:
+        #check if flavor is used by other vnf
+        try:
+            c = mydb.get_rows(FROM='vms', WHERE={'flavor_id':flavor} )
+            if len(c) > 0:
+                logger.debug("Flavor '%s' not deleted because it is being used by another VNF", flavor)
+                continue
+            #flavor not used, must be deleted
+            #delelte at VIM
+            c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id': flavor})
+            for flavor_vim in c:
+                if not flavor_vim['created']:  # skip this flavor because not created by openmano
+                    continue
+                # look for vim
+                myvim = None
+                for vim in vims.values():
+                    if vim["config"]["datacenter_tenant_id"] == flavor_vim["datacenter_vim_id"]:
+                        myvim = vim
+                        break
+                if not myvim:
+                    continue
+                try:
+                    myvim.delete_flavor(flavor_vim["vim_id"])
+                except vimconn.vimconnNotFoundException:
+                    logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"],
+                                flavor_vim["datacenter_vim_id"] )
+                except vimconn.vimconnException as e:
+                    logger.error("Not possible to delete VIM flavor %s from datacenter %s: %s %s",
+                            flavor_vim["vim_id"], flavor_vim["datacenter_vim_id"], type(e).__name__, str(e))
+                    undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"],
+                                                                         flavor_vim["datacenter_vim_id"]))
+            # delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors
+            mydb.delete_row_by_id('flavors', flavor)
+        except db_base_Exception as e:
+            logger.error("delete_vnf_error. Not possible to get flavor details and delete '%s'. %s", flavor, str(e))
+            undeletedItems.append("flavor {}".format(flavor))
+
+
+    for image in imageList:
+        try:
+            #check if image is used by other vnf
+            c = mydb.get_rows(FROM='vms', WHERE=[{'image_id': image}, {'image_list LIKE ': '%' + image + '%'}])
+            if len(c) > 0:
+                logger.debug("Image '%s' not deleted because it is being used by another VNF", image)
+                continue
+            #image not used, must be deleted
+            #delelte at VIM
+            c = mydb.get_rows(FROM='datacenters_images', WHERE={'image_id':image})
+            for image_vim in c:
+                if image_vim["datacenter_vim_id"] not in vims:   # TODO change to datacenter_tenant_id
+                    continue
+                if image_vim['created']=='false': #skip this image because not created by openmano
+                    continue
+                myvim=vims[ image_vim["datacenter_id"] ]
+                try:
+                    myvim.delete_image(image_vim["vim_id"])
+                except vimconn.vimconnNotFoundException as e:
+                    logger.warn("VIM image %s not exist at datacenter %s", image_vim["vim_id"], image_vim["datacenter_id"] )
+                except vimconn.vimconnException as e:
+                    logger.error("Not possible to delete VIM image %s from datacenter %s: %s %s",
+                            image_vim["vim_id"], image_vim["datacenter_id"], type(e).__name__, str(e))
+                    undeletedItems.append("image {} from VIM {}".format(image_vim["vim_id"], image_vim["datacenter_id"] ))
+            #delete image from Database, using table images and with cascade foreign key also at datacenters_images
+            mydb.delete_row_by_id('images', image)
+        except db_base_Exception as e:
+            logger.error("delete_vnf_error. Not possible to get image details and delete '%s'. %s", image, str(e))
+            undeletedItems.append("image {}".format(image))
+
+    return vnf_id + " " + vnf["name"]
+    #if undeletedItems:
+    #    return "delete_vnf. Undeleted: %s" %(undeletedItems)
+
+
+@deprecated("Not used")
+def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
+    result, vims = get_vim(mydb, nfvo_tenant_id, None, datacenter_name)
+    if result < 0:
+        return result, vims
+    elif result == 0:
+        return -httperrors.Not_Found, "datacenter '{}' not found".format(datacenter_name)
+    myvim = next(iter(vims.values()))
+    result,servers =  myvim.get_hosts_info()
+    if result < 0:
+        return result, servers
+    topology = {'name':myvim['name'] , 'servers': servers}
+    return result, topology
+
+
+def get_hosts(mydb, nfvo_tenant_id):
+    vims = get_vim(mydb, nfvo_tenant_id)
+    if len(vims) == 0:
+        raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), httperrors.Not_Found)
+    elif len(vims)>1:
+        #print "nfvo.datacenter_action() error. Several datacenters found"
+        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+    myvim = next(iter(vims.values()))
+    try:
+        hosts =  myvim.get_hosts()
+        logger.debug('VIM hosts response: '+ yaml.safe_dump(hosts, indent=4, default_flow_style=False))
+
+        datacenter = {'Datacenters': [ {'name':myvim['name'],'servers':[]} ] }
+        for host in hosts:
+            server={'name':host['name'], 'vms':[]}
+            for vm in host['instances']:
+                #get internal name and model
+                try:
+                    c = mydb.get_rows(SELECT=('name',), FROM='instance_vms as iv join vms on iv.vm_id=vms.uuid',\
+                        WHERE={'vim_vm_id':vm['id']} )
+                    if len(c) == 0:
+                        logger.warn("nfvo.get_hosts virtual machine at VIM '{}' not found at tidnfvo".format(vm['id']))
+                        continue
+                    server['vms'].append( {'name':vm['name'] , 'model':c[0]['name']} )
+
+                except db_base_Exception as e:
+                    logger.warn("nfvo.get_hosts virtual machine at VIM '{}' error {}".format(vm['id'], str(e)))
+            datacenter['Datacenters'][0]['servers'].append(server)
+        #return -400, "en construccion"
+
+        #print 'datacenters '+ json.dumps(datacenter, indent=4)
+        return datacenter
+    except vimconn.vimconnException as e:
+        raise NfvoException("Not possible to get_host_list from VIM: {}".format(str(e)), e.http_code)
+
+
+@deprecated("Use new_nsd_v3")
+def new_scenario(mydb, tenant_id, topo):
+
+#    result, vims = get_vim(mydb, tenant_id)
+#    if result < 0:
+#        return result, vims
+#1: parse input
+    if tenant_id != "any":
+        check_tenant(mydb, tenant_id)
+        if "tenant_id" in topo:
+            if topo["tenant_id"] != tenant_id:
+                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(topo["tenant_id"], tenant_id),
+                                    httperrors.Unauthorized)
+    else:
+        tenant_id=None
+
+#1.1: get VNFs and external_networks (other_nets).
+    vnfs={}
+    other_nets={}  #external_networks, bridge_networks and data_networkds
+    nodes = topo['topology']['nodes']
+    for k in nodes.keys():
+        if nodes[k]['type'] == 'VNF':
+            vnfs[k] = nodes[k]
+            vnfs[k]['ifaces'] = {}
+        elif nodes[k]['type'] == 'other_network' or nodes[k]['type'] == 'external_network':
+            other_nets[k] = nodes[k]
+            other_nets[k]['external']=True
+        elif nodes[k]['type'] == 'network':
+            other_nets[k] = nodes[k]
+            other_nets[k]['external']=False
+
+
+#1.2: Check that VNF are present at database table vnfs. Insert uuid, description and external interfaces
+    for name,vnf in vnfs.items():
+        where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
+        error_text = ""
+        error_pos = "'topology':'nodes':'" + name + "'"
+        if 'vnf_id' in vnf:
+            error_text += " 'vnf_id' " +  vnf['vnf_id']
+            where['uuid'] = vnf['vnf_id']
+        if 'VNF model' in vnf:
+            error_text += " 'VNF model' " +  vnf['VNF model']
+            where['name'] = vnf['VNF model']
+        if len(where) == 1:
+            raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, httperrors.Bad_Request)
+
+        vnf_db = mydb.get_rows(SELECT=('uuid','name','description'),
+                               FROM='vnfs',
+                               WHERE=where)
+        if len(vnf_db)==0:
+            raise NfvoException("unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
+        elif len(vnf_db)>1:
+            raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
+        vnf['uuid']=vnf_db[0]['uuid']
+        vnf['description']=vnf_db[0]['description']
+        #get external interfaces
+        ext_ifaces = mydb.get_rows(SELECT=('external_name as name','i.uuid as iface_uuid', 'i.type as type'),
+            FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+            WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
+        for ext_iface in ext_ifaces:
+            vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type':ext_iface['type']}
+
+#1.4 get list of connections
+    conections = topo['topology']['connections']
+    conections_list = []
+    conections_list_name = []
+    for k in conections.keys():
+        if type(conections[k]['nodes'])==dict: #dict with node:iface pairs
+            ifaces_list = conections[k]['nodes'].items()
+        elif type(conections[k]['nodes'])==list: #list with dictionary
+            ifaces_list=[]
+            conection_pair_list = map(lambda x: x.items(), conections[k]['nodes'] )
+            for k2 in conection_pair_list:
+                ifaces_list += k2
+
+        con_type = conections[k].get("type", "link")
+        if con_type != "link":
+            if k in other_nets:
+                raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), httperrors.Bad_Request)
+            other_nets[k] = {'external': False}
+            if conections[k].get("graph"):
+                other_nets[k]["graph"] =   conections[k]["graph"]
+            ifaces_list.append( (k, None) )
+
+
+        if con_type == "external_network":
+            other_nets[k]['external'] = True
+            if conections[k].get("model"):
+                other_nets[k]["model"] =   conections[k]["model"]
+            else:
+                other_nets[k]["model"] =   k
+        if con_type == "dataplane_net" or con_type == "bridge_net":
+            other_nets[k]["model"] = con_type
+
+        conections_list_name.append(k)
+        conections_list.append(set(ifaces_list)) #from list to set to operate as a set (this conversion removes elements that are repeated in a list)
+        #print set(ifaces_list)
+    #check valid VNF and iface names
+        for iface in ifaces_list:
+            if iface[0] not in vnfs and iface[0] not in other_nets :
+                raise NfvoException("format error. Invalid VNF name at 'topology':'connections':'{}':'nodes':'{}'".format(
+                                                                                        str(k), iface[0]), httperrors.Not_Found)
+            if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
+                raise NfvoException("format error. Invalid interface name at 'topology':'connections':'{}':'nodes':'{}':'{}'".format(
+                                                                                        str(k), iface[0], iface[1]), httperrors.Not_Found)
+
+#1.5 unify connections from the pair list to a consolidated list
+    index=0
+    while index < len(conections_list):
+        index2 = index+1
+        while index2 < len(conections_list):
+            if len(conections_list[index] & conections_list[index2])>0: #common interface, join nets
+                conections_list[index] |= conections_list[index2]
+                del conections_list[index2]
+                del conections_list_name[index2]
+            else:
+                index2 += 1
+        conections_list[index] = list(conections_list[index])  # from set to list again
+        index += 1
+    #for k in conections_list:
+    #    print k
+
+
+
+#1.6 Delete non external nets
+#    for k in other_nets.keys():
+#        if other_nets[k]['model']=='bridge' or other_nets[k]['model']=='dataplane_net' or other_nets[k]['model']=='bridge_net':
+#            for con in conections_list:
+#                delete_indexes=[]
+#                for index in range(0,len(con)):
+#                    if con[index][0] == k: delete_indexes.insert(0,index) #order from higher to lower
+#                for index in delete_indexes:
+#                    del con[index]
+#            del other_nets[k]
+#1.7: Check external_ports are present at database table datacenter_nets
+    for k,net in other_nets.items():
+        error_pos = "'topology':'nodes':'" + k + "'"
+        if net['external']==False:
+            if 'name' not in net:
+                net['name']=k
+            if 'model' not in net:
+                raise NfvoException("needed a 'model' at " + error_pos, httperrors.Bad_Request)
+            if net['model']=='bridge_net':
+                net['type']='bridge';
+            elif net['model']=='dataplane_net':
+                net['type']='data';
+            else:
+                raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, httperrors.Not_Found)
+        else: #external
+#IF we do not want to check that external network exist at datacenter
+            pass
+#ELSE
+#             error_text = ""
+#             WHERE_={}
+#             if 'net_id' in net:
+#                 error_text += " 'net_id' " +  net['net_id']
+#                 WHERE_['uuid'] = net['net_id']
+#             if 'model' in net:
+#                 error_text += " 'model' " +  net['model']
+#                 WHERE_['name'] = net['model']
+#             if len(WHERE_) == 0:
+#                 return -httperrors.Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
+#             r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
+#                 FROM='datacenter_nets', WHERE=WHERE_ )
+#             if r<0:
+#                 print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
+#             elif r==0:
+#                 print "nfvo.new_scenario Error" +error_text+ " is not present at database"
+#                 return -httperrors.Bad_Request, "unknown " +error_text+ " at " + error_pos
+#             elif r>1:
+#                 print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
+#                 return -httperrors.Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
+#             other_nets[k].update(net_db[0])
+#ENDIF
+    net_list={}
+    net_nb=0  #Number of nets
+    for con in conections_list:
+        #check if this is connected to a external net
+        other_net_index=-1
+        #print
+        #print "con", con
+        for index in range(0,len(con)):
+            #check if this is connected to a external net
+            for net_key in other_nets.keys():
+                if con[index][0]==net_key:
+                    if other_net_index>=0:
+                        error_text = "There is some interface connected both to net '{}' and net '{}'".format(
+                            con[other_net_index][0], net_key)
+                        #print "nfvo.new_scenario " + error_text
+                        raise NfvoException(error_text, httperrors.Bad_Request)
+                    else:
+                        other_net_index = index
+                        net_target = net_key
+                    break
+        #print "other_net_index", other_net_index
+        try:
+            if other_net_index>=0:
+                del con[other_net_index]
+#IF we do not want to check that external network exist at datacenter
+                if other_nets[net_target]['external'] :
+                    if "name" not in other_nets[net_target]:
+                        other_nets[net_target]['name'] =  other_nets[net_target]['model']
+                    if other_nets[net_target]["type"] == "external_network":
+                        if vnfs[ con[0][0] ]['ifaces'][ con[0][1] ]["type"] == "data":
+                            other_nets[net_target]["type"] =  "data"
+                        else:
+                            other_nets[net_target]["type"] =  "bridge"
+#ELSE
+#                 if other_nets[net_target]['external'] :
+#                     type_='data' if len(con)>1 else 'ptp'  #an external net is connected to a external port, so it is ptp if only one connection is done to this net
+#                     if type_=='data' and other_nets[net_target]['type']=="ptp":
+#                         error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
+#                         print "nfvo.new_scenario " + error_text
+#                         return -httperrors.Bad_Request, error_text
+#ENDIF
+                for iface in con:
+                    vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+            else:
+                #create a net
+                net_type_bridge=False
+                net_type_data=False
+                net_target = "__-__net"+str(net_nb)
+                net_list[net_target] = {'name': conections_list_name[net_nb],  #"net-"+str(net_nb),
+                    'description':"net-{} in scenario {}".format(net_nb,topo['name']),
+                    'external':False}
+                for iface in con:
+                    vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+                    iface_type = vnfs[ iface[0] ]['ifaces'][ iface[1] ]['type']
+                    if iface_type=='mgmt' or iface_type=='bridge':
+                        net_type_bridge = True
+                    else:
+                        net_type_data = True
+                if net_type_bridge and net_type_data:
+                    error_text = "Error connection interfaces of bridge type with data type. Firs node {}, iface {}".format(iface[0], iface[1])
+                    #print "nfvo.new_scenario " + error_text
+                    raise NfvoException(error_text, httperrors.Bad_Request)
+                elif net_type_bridge:
+                    type_='bridge'
+                else:
+                    type_='data' if len(con)>2 else 'ptp'
+                net_list[net_target]['type'] = type_
+                net_nb+=1
+        except Exception:
+            error_text = "Error connection node {} : {} does not match any VNF or interface".format(iface[0], iface[1])
+            #print "nfvo.new_scenario " + error_text
+            #raise e
+            raise NfvoException(error_text, httperrors.Bad_Request)
+
+#1.8: Connect to management net all not already connected interfaces of type 'mgmt'
+    #1.8.1 obtain management net
+    mgmt_net = mydb.get_rows(SELECT=('uuid','name','description','type','shared'),
+        FROM='datacenter_nets', WHERE={'name':'mgmt'} )
+    #1.8.2 check all interfaces from all vnfs
+    if len(mgmt_net)>0:
+        add_mgmt_net = False
+        for vnf in vnfs.values():
+            for iface in vnf['ifaces'].values():
+                if iface['type']=='mgmt' and 'net_key' not in iface:
+                    #iface not connected
+                    iface['net_key'] = 'mgmt'
+                    add_mgmt_net = True
+        if add_mgmt_net and 'mgmt' not in net_list:
+            net_list['mgmt']=mgmt_net[0]
+            net_list['mgmt']['external']=True
+            net_list['mgmt']['graph']={'visible':False}
+
+    net_list.update(other_nets)
+    #print
+    #print 'net_list', net_list
+    #print
+    #print 'vnfs', vnfs
+    #print
+
+#2: insert scenario. filling tables scenarios,sce_vnfs,sce_interfaces,sce_nets
+    c = mydb.new_scenario( { 'vnfs':vnfs, 'nets':net_list,
+        'tenant_id':tenant_id, 'name':topo['name'],
+         'description':topo.get('description',topo['name']),
+         'public': topo.get('public', False)
+         })
+
+    return c
+
+
+@deprecated("Use new_nsd_v3")
+def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
+    """ This creates a new scenario for version 0.2 and 0.3"""
+    scenario = scenario_dict["scenario"]
+    if tenant_id != "any":
+        check_tenant(mydb, tenant_id)
+        if "tenant_id" in scenario:
+            if scenario["tenant_id"] != tenant_id:
+                # print "nfvo.new_scenario_v02() tenant '%s' not found" % tenant_id
+                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(
+                                                    scenario["tenant_id"], tenant_id), httperrors.Unauthorized)
+    else:
+        tenant_id=None
+
+    # 1: Check that VNF are present at database table vnfs and update content into scenario dict
+    for name,vnf in scenario["vnfs"].items():
+        where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
+        error_text = ""
+        error_pos = "'scenario':'vnfs':'" + name + "'"
+        if 'vnf_id' in vnf:
+            error_text += " 'vnf_id' " + vnf['vnf_id']
+            where['uuid'] = vnf['vnf_id']
+        if 'vnf_name' in vnf:
+            error_text += " 'vnf_name' " + vnf['vnf_name']
+            where['name'] = vnf['vnf_name']
+        if len(where) == 1:
+            raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, httperrors.Bad_Request)
+        vnf_db = mydb.get_rows(SELECT=('uuid', 'name', 'description'),
+                               FROM='vnfs',
+                               WHERE=where)
+        if len(vnf_db) == 0:
+            raise NfvoException("Unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
+        elif len(vnf_db) > 1:
+            raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
+        vnf['uuid'] = vnf_db[0]['uuid']
+        vnf['description'] = vnf_db[0]['description']
+        vnf['ifaces'] = {}
+        # get external interfaces
+        ext_ifaces = mydb.get_rows(SELECT=('external_name as name', 'i.uuid as iface_uuid', 'i.type as type'),
+                                   FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+                                   WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
+        for ext_iface in ext_ifaces:
+            vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type': ext_iface['type']}
+        # TODO? get internal-connections from db.nets and their profiles, and update scenario[vnfs][internal-connections] accordingly
+
+    # 2: Insert net_key and ip_address at every vnf interface
+    for net_name, net in scenario["networks"].items():
+        net_type_bridge = False
+        net_type_data = False
+        for iface_dict in net["interfaces"]:
+            if version == "0.2":
+                temp_dict = iface_dict
+                ip_address = None
+            elif version == "0.3":
+                temp_dict = {iface_dict["vnf"] : iface_dict["vnf_interface"]}
+                ip_address = iface_dict.get('ip_address', None)
+            for vnf, iface in temp_dict.items():
+                if vnf not in scenario["vnfs"]:
+                    error_text = "Error at 'networks':'{}':'interfaces' VNF '{}' not match any VNF at 'vnfs'".format(
+                        net_name, vnf)
+                    # logger.debug("nfvo.new_scenario_v02 " + error_text)
+                    raise NfvoException(error_text, httperrors.Not_Found)
+                if iface not in scenario["vnfs"][vnf]['ifaces']:
+                    error_text = "Error at 'networks':'{}':'interfaces':'{}' interface not match any VNF interface"\
+                        .format(net_name, iface)
+                    # logger.debug("nfvo.new_scenario_v02 " + error_text)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
+                if "net_key" in scenario["vnfs"][vnf]['ifaces'][iface]:
+                    error_text = "Error at 'networks':'{}':'interfaces':'{}' interface already connected at network"\
+                                 "'{}'".format(net_name, iface,scenario["vnfs"][vnf]['ifaces'][iface]['net_key'])
+                    # logger.debug("nfvo.new_scenario_v02 " + error_text)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
+                scenario["vnfs"][vnf]['ifaces'][ iface ]['net_key'] = net_name
+                scenario["vnfs"][vnf]['ifaces'][iface]['ip_address'] = ip_address
+                iface_type = scenario["vnfs"][vnf]['ifaces'][iface]['type']
+                if iface_type == 'mgmt' or iface_type == 'bridge':
+                    net_type_bridge = True
+                else:
+                    net_type_data = True
+
+        if net_type_bridge and net_type_data:
+            error_text = "Error connection interfaces of 'bridge' type and 'data' type at 'networks':'{}':'interfaces'"\
+                .format(net_name)
+            # logger.debug("nfvo.new_scenario " + error_text)
+            raise NfvoException(error_text, httperrors.Bad_Request)
+        elif net_type_bridge:
+            type_ = 'bridge'
+        else:
+            type_ = 'data' if len(net["interfaces"]) > 2 else 'ptp'
+
+        if net.get("implementation"):     # for v0.3
+            if type_ == "bridge" and net["implementation"] == "underlay":
+                error_text = "Error connecting interfaces of data type to a network declared as 'underlay' at "\
+                             "'network':'{}'".format(net_name)
+                # logger.debug(error_text)
+                raise NfvoException(error_text, httperrors.Bad_Request)
+            elif type_ != "bridge" and net["implementation"] == "overlay":
+                error_text = "Error connecting interfaces of data type to a network declared as 'overlay' at "\
+                             "'network':'{}'".format(net_name)
+                # logger.debug(error_text)
+                raise NfvoException(error_text, httperrors.Bad_Request)
+            net.pop("implementation")
+        if "type" in net and version == "0.3":   # for v0.3
+            if type_ == "data" and net["type"] == "e-line":
+                error_text = "Error connecting more than 2 interfaces of data type to a network declared as type "\
+                             "'e-line' at 'network':'{}'".format(net_name)
+                # logger.debug(error_text)
+                raise NfvoException(error_text, httperrors.Bad_Request)
+            elif type_ == "ptp" and net["type"] == "e-lan":
+                type_ = "data"
+
+        net['type'] = type_
+        net['name'] = net_name
+        net['external'] = net.get('external', False)
+
+    # 3: insert at database
+    scenario["nets"] = scenario["networks"]
+    scenario['tenant_id'] = tenant_id
+    scenario_id = mydb.new_scenario(scenario)
+    return scenario_id
+
+
+def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
+    """
+    Parses an OSM IM nsd_catalog and insert at DB
+    :param mydb:
+    :param tenant_id:
+    :param nsd_descriptor:
+    :return: The list of created NSD ids
+    """
+    try:
+        mynsd = nsd_catalog.nsd()
+        try:
+            pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd, skip_unknown=True)
+        except Exception as e:
+            raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
+        db_scenarios = []
+        db_sce_nets = []
+        db_sce_vnfs = []
+        db_sce_interfaces = []
+        db_sce_vnffgs = []
+        db_sce_rsps = []
+        db_sce_rsp_hops = []
+        db_sce_classifiers = []
+        db_sce_classifier_matches = []
+        db_ip_profiles = []
+        db_ip_profiles_index = 0
+        uuid_list = []
+        nsd_uuid_list = []
+        for nsd_yang in mynsd.nsd_catalog.nsd.values():
+            nsd = nsd_yang.get()
+
+            # table scenarios
+            scenario_uuid = str(uuid4())
+            uuid_list.append(scenario_uuid)
+            nsd_uuid_list.append(scenario_uuid)
+            db_scenario = {
+                "uuid": scenario_uuid,
+                "osm_id": get_str(nsd, "id", 255),
+                "name": get_str(nsd, "name", 255),
+                "description": get_str(nsd, "description", 255),
+                "tenant_id": tenant_id,
+                "vendor": get_str(nsd, "vendor", 255),
+                "short_name": get_str(nsd, "short-name", 255),
+                "descriptor": str(nsd_descriptor)[:60000],
+            }
+            db_scenarios.append(db_scenario)
+
+            # table sce_vnfs (constituent-vnfd)
+            vnf_index2scevnf_uuid = {}
+            vnf_index2vnf_uuid = {}
+            for vnf in nsd.get("constituent-vnfd").values():
+                existing_vnf = mydb.get_rows(FROM="vnfs", WHERE={'osm_id': str(vnf["vnfd-id-ref"])[:255],
+                                                                      'tenant_id': tenant_id})
+                if not existing_vnf:
+                    raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'constituent-vnfd':'vnfd-id-ref':"
+                                        "'{}'. Reference to a non-existing VNFD in the catalog".format(
+                                            str(nsd["id"]), str(vnf["vnfd-id-ref"])[:255]),
+                                        httperrors.Bad_Request)
+                sce_vnf_uuid = str(uuid4())
+                uuid_list.append(sce_vnf_uuid)
+                db_sce_vnf = {
+                    "uuid": sce_vnf_uuid,
+                    "scenario_id": scenario_uuid,
+                    # "name": get_str(vnf, "member-vnf-index", 255),
+                    "name": existing_vnf[0]["name"][:200] + "." + get_str(vnf, "member-vnf-index", 50),
+                    "vnf_id": existing_vnf[0]["uuid"],
+                    "member_vnf_index": str(vnf["member-vnf-index"]),
+                    # TODO 'start-by-default': True
+                }
+                vnf_index2scevnf_uuid[str(vnf['member-vnf-index'])] = sce_vnf_uuid
+                vnf_index2vnf_uuid[str(vnf['member-vnf-index'])] = existing_vnf[0]["uuid"]
+                db_sce_vnfs.append(db_sce_vnf)
+
+            # table ip_profiles (ip-profiles)
+            ip_profile_name2db_table_index = {}
+            for ip_profile in nsd.get("ip-profiles").values():
+                db_ip_profile = {
+                    "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+                    "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+                    "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+                    "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+                    "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+                    "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+                }
+                dns_list = []
+                for dns in ip_profile["ip-profile-params"]["dns-server"].values():
+                    dns_list.append(str(dns.get("address")))
+                db_ip_profile["dns_address"] = ";".join(dns_list)
+                if ip_profile["ip-profile-params"].get('security-group'):
+                    db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+                ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+                db_ip_profiles_index += 1
+                db_ip_profiles.append(db_ip_profile)
+
+            # table sce_nets (internal-vld)
+            for vld in nsd.get("vld").values():
+                sce_net_uuid = str(uuid4())
+                uuid_list.append(sce_net_uuid)
+                db_sce_net = {
+                    "uuid": sce_net_uuid,
+                    "name": get_str(vld, "name", 255),
+                    "scenario_id": scenario_uuid,
+                    # "type": #TODO
+                    "multipoint": not vld.get("type") == "ELINE",
+                    "osm_id":  get_str(vld, "id", 255),
+                    # "external": #TODO
+                    "description": get_str(vld, "description", 255),
+                }
+                # guess type of network
+                if vld.get("mgmt-network"):
+                    db_sce_net["type"] = "bridge"
+                    db_sce_net["external"] = True
+                elif vld.get("provider-network").get("overlay-type") == "VLAN":
+                    db_sce_net["type"] = "data"
+                else:
+                    # later on it will be fixed to bridge or data depending on the type of interfaces attached to it
+                    db_sce_net["type"] = None
+                db_sce_nets.append(db_sce_net)
+
+                # ip-profile, link db_ip_profile with db_sce_net
+                if vld.get("ip-profile-ref"):
+                    ip_profile_name = vld.get("ip-profile-ref")
+                    if ip_profile_name not in ip_profile_name2db_table_index:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'ip-profile-ref':'{}'."
+                                            " Reference to a non-existing 'ip_profiles'".format(
+                                                str(nsd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+                                            httperrors.Bad_Request)
+                    db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["sce_net_id"] = sce_net_uuid
+                elif vld.get("vim-network-name"):
+                    db_sce_net["vim_network_name"] = get_str(vld, "vim-network-name", 255)
+
+                # table sce_interfaces (vld:vnfd-connection-point-ref)
+                for iface in vld.get("vnfd-connection-point-ref").values():
+                    vnf_index = str(iface['member-vnf-index-ref'])
+                    # check correct parameters
+                    if vnf_index not in vnf_index2vnf_uuid:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
+                                            "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                            "'nsd':'constituent-vnfd'".format(
+                                                str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
+                                            httperrors.Bad_Request)
+
+                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
+                                                    FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                    WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                           'external_name': get_str(iface, "vnfd-connection-point-ref",
+                                                                                    255)})
+                    if not existing_ifaces:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
+                                            "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+                                            "connection-point name at VNFD '{}'".format(
+                                                str(nsd["id"]), str(vld["id"]), str(iface["vnfd-connection-point-ref"]),
+                                                str(iface.get("vnfd-id-ref"))[:255]),
+                                            httperrors.Bad_Request)
+                    interface_uuid = existing_ifaces[0]["uuid"]
+                    if existing_ifaces[0]["iface_type"] == "data":
+                        db_sce_net["type"] = "data"
+                    sce_interface_uuid = str(uuid4())
+                    uuid_list.append(sce_net_uuid)
+                    iface_ip_address = None
+                    if iface.get("ip-address"):
+                        iface_ip_address = str(iface.get("ip-address"))
+                    db_sce_interface = {
+                        "uuid": sce_interface_uuid,
+                        "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                        "sce_net_id": sce_net_uuid,
+                        "interface_id": interface_uuid,
+                        "ip_address": iface_ip_address,
+                    }
+                    db_sce_interfaces.append(db_sce_interface)
+                if not db_sce_net["type"]:
+                    db_sce_net["type"] = "bridge"
+
+            # table sce_vnffgs (vnffgd)
+            for vnffg in nsd.get("vnffgd").values():
+                sce_vnffg_uuid = str(uuid4())
+                uuid_list.append(sce_vnffg_uuid)
+                db_sce_vnffg = {
+                    "uuid": sce_vnffg_uuid,
+                    "name": get_str(vnffg, "name", 255),
+                    "scenario_id": scenario_uuid,
+                    "vendor": get_str(vnffg, "vendor", 255),
+                    "description": get_str(vld, "description", 255),
+                }
+                db_sce_vnffgs.append(db_sce_vnffg)
+
+                # deal with rsps
+                for rsp in vnffg.get("rsp").values():
+                    sce_rsp_uuid = str(uuid4())
+                    uuid_list.append(sce_rsp_uuid)
+                    db_sce_rsp = {
+                        "uuid": sce_rsp_uuid,
+                        "name": get_str(rsp, "name", 255),
+                        "sce_vnffg_id": sce_vnffg_uuid,
+                        "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
+                    }
+                    db_sce_rsps.append(db_sce_rsp)
+                    for iface in rsp.get("vnfd-connection-point-ref").values():
+                        vnf_index = str(iface['member-vnf-index-ref'])
+                        if_order = int(iface['order'])
+                        # check correct parameters
+                        if vnf_index not in vnf_index2vnf_uuid:
+                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                                "'nsd':'constituent-vnfd'".format(
+                                                    str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
+                                                httperrors.Bad_Request)
+
+                        ingress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                                FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                                WHERE={
+                                                                    'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                                    'external_name': get_str(iface, "vnfd-ingress-connection-point-ref",
+                                                                                             255)})
+                        if not ingress_existing_ifaces:
+                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                "-ref':'vnfd-ingress-connection-point-ref':'{}'. Reference to a non-existing "
+                                                "connection-point name at VNFD '{}'".format(
+                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-ingress-connection-point-ref"]),
+                                str(iface.get("vnfd-id-ref"))[:255]), httperrors.Bad_Request)
+
+                        egress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                               FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                               WHERE={
+                                                                   'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                                   'external_name': get_str(iface, "vnfd-egress-connection-point-ref",
+                                                                                            255)})
+                        if not egress_existing_ifaces:
+                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                "-ref':'vnfd-egress-connection-point-ref':'{}'. Reference to a non-existing "
+                                                "connection-point name at VNFD '{}'".format(
+                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-egress-connection-point-ref"]),
+                                str(iface.get("vnfd-id-ref"))[:255]), HTTP_Bad_Request)
+
+                        ingress_interface_uuid = ingress_existing_ifaces[0]["uuid"]
+                        egress_interface_uuid = egress_existing_ifaces[0]["uuid"]
+                        sce_rsp_hop_uuid = str(uuid4())
+                        uuid_list.append(sce_rsp_hop_uuid)
+                        db_sce_rsp_hop = {
+                            "uuid": sce_rsp_hop_uuid,
+                            "if_order": if_order,
+                            "ingress_interface_id": ingress_interface_uuid,
+                            "egress_interface_id": egress_interface_uuid,
+                            "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                            "sce_rsp_id": sce_rsp_uuid,
+                        }
+                        db_sce_rsp_hops.append(db_sce_rsp_hop)
+
+                # deal with classifiers
+                for classifier in vnffg.get("classifier").values():
+                    sce_classifier_uuid = str(uuid4())
+                    uuid_list.append(sce_classifier_uuid)
+
+                    # source VNF
+                    vnf_index = str(classifier['member-vnf-index-ref'])
+                    if vnf_index not in vnf_index2vnf_uuid:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
+                                            "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                            "'nsd':'constituent-vnfd'".format(
+                                                str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
+                                            httperrors.Bad_Request)
+                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                    FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                    WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                           'external_name': get_str(classifier, "vnfd-connection-point-ref",
+                                                                                    255)})
+                    if not existing_ifaces:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                            "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+                                            "connection-point name at VNFD '{}'".format(
+                                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+                                                str(iface.get("vnfd-id-ref"))[:255]),
+                                            httperrors.Bad_Request)
+                    interface_uuid = existing_ifaces[0]["uuid"]
+
+                    db_sce_classifier = {
+                        "uuid": sce_classifier_uuid,
+                        "name": get_str(classifier, "name", 255),
+                        "sce_vnffg_id": sce_vnffg_uuid,
+                        "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                        "interface_id": interface_uuid,
+                    }
+                    rsp_id = get_str(classifier, "rsp-id-ref", 255)
+                    rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
+                    db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
+                    db_sce_classifiers.append(db_sce_classifier)
+
+                    for match in classifier.get("match-attributes").values():
+                        sce_classifier_match_uuid = str(uuid4())
+                        uuid_list.append(sce_classifier_match_uuid)
+                        db_sce_classifier_match = {
+                            "uuid": sce_classifier_match_uuid,
+                            "ip_proto": get_str(match, "ip-proto", 2),
+                            "source_ip": get_str(match, "source-ip-address", 16),
+                            "destination_ip": get_str(match, "destination-ip-address", 16),
+                            "source_port": get_str(match, "source-port", 5),
+                            "destination_port": get_str(match, "destination-port", 5),
+                            "sce_classifier_id": sce_classifier_uuid,
+                        }
+                        db_sce_classifier_matches.append(db_sce_classifier_match)
+                    # TODO: vnf/cp keys
+
+        # remove unneeded id's in sce_rsps
+        for rsp in db_sce_rsps:
+            rsp.pop('id')
+
+        db_tables = [
+            {"scenarios": db_scenarios},
+            {"sce_nets": db_sce_nets},
+            {"ip_profiles": db_ip_profiles},
+            {"sce_vnfs": db_sce_vnfs},
+            {"sce_interfaces": db_sce_interfaces},
+            {"sce_vnffgs": db_sce_vnffgs},
+            {"sce_rsps": db_sce_rsps},
+            {"sce_rsp_hops": db_sce_rsp_hops},
+            {"sce_classifiers": db_sce_classifiers},
+            {"sce_classifier_matches": db_sce_classifier_matches},
+        ]
+
+        logger.debug("new_nsd_v3 done: %s",
+                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+        mydb.new_rows(db_tables, uuid_list)
+        return nsd_uuid_list
+    except NfvoException:
+        raise
+    except Exception as e:
+        logger.error("Exception {}".format(e))
+        raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
+
+
+def edit_scenario(mydb, tenant_id, scenario_id, data):
+    data["uuid"] = scenario_id
+    data["tenant_id"] = tenant_id
+    c = mydb.edit_scenario( data )
+    return c
+
+
+@deprecated("Use create_instance")
+def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instance_scenario_description, datacenter=None,vim_tenant=None, startvms=True):
+    #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+    datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter, vim_tenant=vim_tenant)
+    vims = {datacenter_id: myvim}
+    myvim_tenant = myvim['tenant_id']
+    datacenter_name = myvim['name']
+
+    rollbackList=[]
+    try:
+        #print "Checking that the scenario_id exists and getting the scenario dictionary"
+        scenarioDict = mydb.get_scenario(scenario_id, tenant_id, datacenter_id=datacenter_id)
+        scenarioDict['datacenter2tenant'] = { datacenter_id: myvim['config']['datacenter_tenant_id'] }
+        scenarioDict['datacenter_id'] = datacenter_id
+        #print '================scenarioDict======================='
+        #print json.dumps(scenarioDict, indent=4)
+        #print 'BEGIN launching instance scenario "%s" based on "%s"' % (instance_scenario_name,scenarioDict['name'])
+
+        logger.debug("start_scenario Scenario %s: consisting of %d VNF(s)", scenarioDict['name'],len(scenarioDict['vnfs']))
+        #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+
+        auxNetDict = {}   #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
+        auxNetDict['scenario'] = {}
+
+        logger.debug("start_scenario 1. Creating new nets (sce_nets) in the VIM")
+        for sce_net in scenarioDict['nets']:
+            #print "Net name: %s. Description: %s" % (sce_net["name"], sce_net["description"])
+
+            myNetName = "{}.{}".format(instance_scenario_name, sce_net['name'])
+            myNetName = myNetName[0:255] #limit length
+            myNetType = sce_net['type']
+            myNetDict = {}
+            myNetDict["name"] = myNetName
+            myNetDict["type"] = myNetType
+            myNetDict["tenant_id"] = myvim_tenant
+            myNetIPProfile = sce_net.get('ip_profile', None)
+            #TODO:
+            #We should use the dictionary as input parameter for new_network
+            #print myNetDict
+            if not sce_net["external"]:
+                network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile)
+                #print "New VIM network created for scenario %s. Network id:  %s" % (scenarioDict['name'],network_id)
+                sce_net['vim_id'] = network_id
+                auxNetDict['scenario'][sce_net['uuid']] = network_id
+                rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+                sce_net["created"] = True
+            else:
+                if sce_net['vim_id'] == None:
+                    error_text = "Error, datacenter '{}' does not have external network '{}'.".format(
+                        datacenter_name, sce_net['name'])
+                    _, message = rollback(mydb, vims, rollbackList)
+                    logger.error("nfvo.start_scenario: %s", error_text)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
+                logger.debug("Using existent VIM network for scenario %s. Network id %s", scenarioDict['name'],sce_net['vim_id'])
+                auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
+
+        logger.debug("start_scenario 2. Creating new nets (vnf internal nets) in the VIM")
+        #For each vnf net, we create it and we add it to instanceNetlist.
+
+        for sce_vnf in scenarioDict['vnfs']:
+            for net in sce_vnf['nets']:
+                #print "Net name: %s. Description: %s" % (net["name"], net["description"])
+
+                myNetName = "{}.{}".format(instance_scenario_name,net['name'])
+                myNetName = myNetName[0:255] #limit length
+                myNetType = net['type']
+                myNetDict = {}
+                myNetDict["name"] = myNetName
+                myNetDict["type"] = myNetType
+                myNetDict["tenant_id"] = myvim_tenant
+                myNetIPProfile = net.get('ip_profile', None)
+                #print myNetDict
+                #TODO:
+                #We should use the dictionary as input parameter for new_network
+                network_id, _  = myvim.new_network(myNetName, myNetType, myNetIPProfile)
+                #print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
+                net['vim_id'] = network_id
+                if sce_vnf['uuid'] not in auxNetDict:
+                    auxNetDict[sce_vnf['uuid']] = {}
+                auxNetDict[sce_vnf['uuid']][net['uuid']] = network_id
+                rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+                net["created"] = True
+
+        #print "auxNetDict:"
+        #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
+
+        logger.debug("start_scenario 3. Creating new vm instances in the VIM")
+        #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+        i = 0
+        for sce_vnf in scenarioDict['vnfs']:
+            vnf_availability_zones = []
+            for vm in sce_vnf['vms']:
+                vm_av = vm.get('availability_zone')
+                if vm_av and vm_av not in vnf_availability_zones:
+                    vnf_availability_zones.append(vm_av)
+
+            # check if there is enough availability zones available at vim level.
+            if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+                if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+                    raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
+
+            for vm in sce_vnf['vms']:
+                i += 1
+                myVMDict = {}
+                #myVMDict['name'] = "%s-%s-%s" % (scenarioDict['name'],sce_vnf['name'], vm['name'])
+                myVMDict['name'] = "{}.{}.{}".format(instance_scenario_name,sce_vnf['name'],chr(96+i))
+                #myVMDict['description'] = vm['description']
+                myVMDict['description'] = myVMDict['name'][0:99]
+                if not startvms:
+                    myVMDict['start'] = "no"
+                myVMDict['name'] = myVMDict['name'][0:255] #limit name length
+                #print "VM name: %s. Description: %s" % (myVMDict['name'], myVMDict['name'])
+
+                #create image at vim in case it not exist
+                image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
+                image_id = create_or_use_image(mydb, vims, image_dict, [], True)
+                vm['vim_image_id'] = image_id
+
+                #create flavor at vim in case it not exist
+                flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+                if flavor_dict['extended']!=None:
+                    flavor_dict['extended']= yaml.load(flavor_dict['extended'], Loader=yaml.Loader)
+                flavor_id = create_or_use_flavor(mydb, vims, flavor_dict, [], True)
+                vm['vim_flavor_id'] = flavor_id
+
+
+                myVMDict['imageRef'] = vm['vim_image_id']
+                myVMDict['flavorRef'] = vm['vim_flavor_id']
+                myVMDict['networks'] = []
+                for iface in vm['interfaces']:
+                    netDict = {}
+                    if iface['type']=="data":
+                        netDict['type'] = iface['model']
+                    elif "model" in iface and iface["model"]!=None:
+                        netDict['model']=iface['model']
+                    #TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
+                    #discover type of interface looking at flavor
+                    for numa in flavor_dict.get('extended',{}).get('numas',[]):
+                        for flavor_iface in numa.get('interfaces',[]):
+                            if flavor_iface.get('name') == iface['internal_name']:
+                                if flavor_iface['dedicated'] == 'yes':
+                                    netDict['type']="PF"    #passthrough
+                                elif flavor_iface['dedicated'] == 'no':
+                                    netDict['type']="VF"    #siov
+                                elif flavor_iface['dedicated'] == 'yes:sriov':
+                                    netDict['type']="VFnotShared"   #sriov but only one sriov on the PF
+                                netDict["mac_address"] = flavor_iface.get("mac_address")
+                                break;
+                    netDict["use"]=iface['type']
+                    if netDict["use"]=="data" and not netDict.get("type"):
+                        #print "netDict", netDict
+                        #print "iface", iface
+                        e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".format(
+                            sce_vnf['name'], vm['name'], iface['internal_name'])
+                        if flavor_dict.get('extended')==None:
+                            raise NfvoException(e_text  + "After database migration some information is not available. \
+                                    Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
+                        else:
+                            raise NfvoException(e_text, httperrors.Internal_Server_Error)
+                    if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
+                        netDict["type"]="virtual"
+                    if "vpci" in iface and iface["vpci"] is not None:
+                        netDict['vpci'] = iface['vpci']
+                    if "mac" in iface and iface["mac"] is not None:
+                        netDict['mac_address'] = iface['mac']
+                    if "port-security" in iface and iface["port-security"] is not None:
+                        netDict['port_security'] = iface['port-security']
+                    if "floating-ip" in iface and iface["floating-ip"] is not None:
+                        netDict['floating_ip'] = iface['floating-ip']
+                    netDict['name'] = iface['internal_name']
+                    if iface['net_id'] is None:
+                        for vnf_iface in sce_vnf["interfaces"]:
+                            #print iface
+                            #print vnf_iface
+                            if vnf_iface['interface_id']==iface['uuid']:
+                                netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ]
+                                break
+                    else:
+                        netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
+                    #skip bridge ifaces not connected to any net
+                    #if 'net_id' not in netDict or netDict['net_id']==None:
+                    #    continue
+                    myVMDict['networks'].append(netDict)
+                #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+                #print myVMDict['name']
+                #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+                #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+                #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+
+                if 'availability_zone' in myVMDict:
+                    av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+                else:
+                    av_index = None
+
+                vm_id, _ = myvim.new_vminstance(myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
+                                             myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
+                                             availability_zone_index=av_index,
+                                             availability_zone_list=vnf_availability_zones)
+                #print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
+                vm['vim_id'] = vm_id
+                rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
+                #put interface uuid back to scenario[vnfs][vms[[interfaces]
+                for net in myVMDict['networks']:
+                    if "vim_id" in net:
+                        for iface in vm['interfaces']:
+                            if net["name"]==iface["internal_name"]:
+                                iface["vim_id"]=net["vim_id"]
+                                break
+
+        logger.debug("start scenario Deployment done")
+        #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+        #r,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,scenarioDict['name'],scenarioDict)
+        instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_scenario_name, instance_scenario_description, scenarioDict)
+        return mydb.get_instance_scenario(instance_id)
+
+    except (db_base_Exception, vimconn.vimconnException) as e:
+        _, message = rollback(mydb, vims, rollbackList)
+        if isinstance(e, db_base_Exception):
+            error_text = "Exception at database"
+        else:
+            error_text = "Exception at VIM"
+        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+        #logger.error("start_scenario %s", error_text)
+        raise NfvoException(error_text, e.http_code)
+
+def unify_cloud_config(cloud_config_preserve, cloud_config):
+    """ join the cloud config information into cloud_config_preserve.
+    In case of conflict cloud_config_preserve preserves
+    None is allowed
+    """
+    if not cloud_config_preserve and not cloud_config:
+        return None
+
+    new_cloud_config = {"key-pairs":[], "users":[]}
+    # key-pairs
+    if cloud_config_preserve:
+        for key in cloud_config_preserve.get("key-pairs", () ):
+            if key not in new_cloud_config["key-pairs"]:
+                new_cloud_config["key-pairs"].append(key)
+    if cloud_config:
+        for key in cloud_config.get("key-pairs", () ):
+            if key not in new_cloud_config["key-pairs"]:
+                new_cloud_config["key-pairs"].append(key)
+    if not new_cloud_config["key-pairs"]:
+        del new_cloud_config["key-pairs"]
+
+    # users
+    if cloud_config:
+        new_cloud_config["users"] += cloud_config.get("users", () )
+    if cloud_config_preserve:
+        new_cloud_config["users"] += cloud_config_preserve.get("users", () )
+    index_to_delete = []
+    users = new_cloud_config.get("users", [])
+    for index0 in range(0,len(users)):
+        if index0 in index_to_delete:
+            continue
+        for index1 in range(index0+1,len(users)):
+            if index1 in index_to_delete:
+                continue
+            if users[index0]["name"] == users[index1]["name"]:
+                index_to_delete.append(index1)
+                for key in users[index1].get("key-pairs",()):
+                    if "key-pairs" not in users[index0]:
+                        users[index0]["key-pairs"] = [key]
+                    elif key not in users[index0]["key-pairs"]:
+                        users[index0]["key-pairs"].append(key)
+    index_to_delete.sort(reverse=True)
+    for index in index_to_delete:
+        del users[index]
+    if not new_cloud_config["users"]:
+        del new_cloud_config["users"]
+
+    #boot-data-drive
+    if cloud_config and cloud_config.get("boot-data-drive") != None:
+        new_cloud_config["boot-data-drive"] = cloud_config["boot-data-drive"]
+    if cloud_config_preserve and cloud_config_preserve.get("boot-data-drive") != None:
+        new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
+
+    # user-data
+    new_cloud_config["user-data"] = []
+    if cloud_config and cloud_config.get("user-data"):
+        if isinstance(cloud_config["user-data"], list):
+            new_cloud_config["user-data"] += cloud_config["user-data"]
+        else:
+            new_cloud_config["user-data"].append(cloud_config["user-data"])
+    if cloud_config_preserve and cloud_config_preserve.get("user-data"):
+        if isinstance(cloud_config_preserve["user-data"], list):
+            new_cloud_config["user-data"] += cloud_config_preserve["user-data"]
+        else:
+            new_cloud_config["user-data"].append(cloud_config_preserve["user-data"])
+    if not new_cloud_config["user-data"]:
+        del new_cloud_config["user-data"]
+
+    # config files
+    new_cloud_config["config-files"] = []
+    if cloud_config and cloud_config.get("config-files") != None:
+        new_cloud_config["config-files"] += cloud_config["config-files"]
+    if cloud_config_preserve:
+        for file in cloud_config_preserve.get("config-files", ()):
+            for index in range(0, len(new_cloud_config["config-files"])):
+                if new_cloud_config["config-files"][index]["dest"] == file["dest"]:
+                    new_cloud_config["config-files"][index] = file
+                    break
+            else:
+                new_cloud_config["config-files"].append(file)
+    if not new_cloud_config["config-files"]:
+        del new_cloud_config["config-files"]
+    return new_cloud_config
+
+
+def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
+    datacenter_id = None
+    datacenter_name = None
+    thread = None
+    try:
+        if datacenter_tenant_id:
+            thread_id = datacenter_tenant_id
+            thread = vim_threads["running"].get(datacenter_tenant_id)
+        else:
+            where_={"td.nfvo_tenant_id": tenant_id}
+            if datacenter_id_name:
+                if utils.check_valid_uuid(datacenter_id_name):
+                    datacenter_id = datacenter_id_name
+                    where_["dt.datacenter_id"] = datacenter_id
+                else:
+                    datacenter_name = datacenter_id_name
+                    where_["d.name"] = datacenter_name
+            if datacenter_tenant_id:
+                where_["dt.uuid"] = datacenter_tenant_id
+            datacenters = mydb.get_rows(
+                SELECT=("dt.uuid as datacenter_tenant_id",),
+                FROM="datacenter_tenants as dt join tenants_datacenters as td on dt.uuid=td.datacenter_tenant_id "
+                     "join datacenters as d on d.uuid=dt.datacenter_id",
+                WHERE=where_)
+            if len(datacenters) > 1:
+                raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+            elif datacenters:
+                thread_id = datacenters[0]["datacenter_tenant_id"]
+                thread = vim_threads["running"].get(thread_id)
+        if not thread:
+            raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+        return thread_id, thread
+    except db_base_Exception as e:
+        raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
+
+
+def get_datacenter_uuid(mydb, tenant_id, datacenter_id_name):
+    WHERE_dict={}
+    if utils.check_valid_uuid(datacenter_id_name):
+        WHERE_dict['d.uuid'] = datacenter_id_name
+    else:
+        WHERE_dict['d.name'] = datacenter_id_name
+
+    if tenant_id:
+        WHERE_dict['nfvo_tenant_id'] = tenant_id
+        from_= "tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as" \
+               " dt on td.datacenter_tenant_id=dt.uuid"
+    else:
+        from_ = 'datacenters as d'
+    vimaccounts = mydb.get_rows(FROM=from_, SELECT=("d.uuid as uuid, d.name as name",), WHERE=WHERE_dict )
+    if len(vimaccounts) == 0:
+        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+    elif len(vimaccounts)>1:
+        #print "nfvo.datacenter_action() error. Several datacenters found"
+        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+    return vimaccounts[0]["uuid"], vimaccounts[0]["name"]
+
+
+def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
+    datacenter_id = None
+    datacenter_name = None
+    if datacenter_id_name:
+        if utils.check_valid_uuid(datacenter_id_name):
+            datacenter_id = datacenter_id_name
+        else:
+            datacenter_name = datacenter_id_name
+    vims = get_vim(mydb, tenant_id, datacenter_id, datacenter_name, **extra_filter)
+    if len(vims) == 0:
+        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+    elif len(vims)>1:
+        #print "nfvo.datacenter_action() error. Several datacenters found"
+        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+    for vim_id, vim_content in vims.items():
+        return vim_id, vim_content
+
+
+def update(d, u):
+    """Takes dict d and updates it with the values in dict u.
+       It merges all depth levels"""
+    for k, v in u.items():
+        if isinstance(v, collections.Mapping):
+            r = update(d.get(k, {}), v)
+            d[k] = r
+        else:
+            d[k] = u[k]
+    return d
+
+
+def create_instance(mydb, tenant_id, instance_dict):
+    # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+    # logger.debug("Creating instance...")
+    scenario = instance_dict["scenario"]
+
+    # find main datacenter
+    myvims = {}
+    myvim_threads_id = {}
+    datacenter = instance_dict.get("datacenter")
+    default_wim_account = instance_dict.get("wim_account")
+    default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+    myvims[default_datacenter_id] = vim
+    myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
+    tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
+    # myvim_tenant = myvim['tenant_id']
+    rollbackList = []
+
+    # print "Checking that the scenario exists and getting the scenario dictionary"
+    if isinstance(scenario, str):
+        scenarioDict = mydb.get_scenario(scenario, tenant_id, datacenter_vim_id=myvim_threads_id[default_datacenter_id],
+                                         datacenter_id=default_datacenter_id)
+    else:
+        scenarioDict = scenario
+        scenarioDict["uuid"] = None
+
+    # logger.debug(">>>>>> Dictionaries before merging")
+    # logger.debug(">>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
+    # logger.debug(">>>>>> ScenarioDict:\n{}".format(yaml.safe_dump(scenarioDict,default_flow_style=False, width=256)))
+
+    db_instance_vnfs = []
+    db_instance_vms = []
+    db_instance_interfaces = []
+    db_instance_sfis = []
+    db_instance_sfs = []
+    db_instance_classifications = []
+    db_instance_sfps = []
+    db_ip_profiles = []
+    db_vim_actions = []
+    uuid_list = []
+    task_index = 0
+    instance_name = instance_dict["name"]
+    instance_uuid = str(uuid4())
+    uuid_list.append(instance_uuid)
+    db_instance_scenario = {
+        "uuid": instance_uuid,
+        "name": instance_name,
+        "tenant_id": tenant_id,
+        "scenario_id": scenarioDict['uuid'],
+        "datacenter_id": default_datacenter_id,
+        # filled bellow 'datacenter_tenant_id'
+        "description": instance_dict.get("description"),
+    }
+    if scenarioDict.get("cloud-config"):
+        db_instance_scenario["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"],
+                                                              default_flow_style=True, width=256)
+    instance_action_id = get_task_id()
+    db_instance_action = {
+        "uuid": instance_action_id,   # same uuid for the instance and the action on create
+        "tenant_id": tenant_id,
+        "instance_id": instance_uuid,
+        "description": "CREATE",
+    }
+
+    # Auxiliary dictionaries from x to y
+    sce_net2instance = {}
+    net2task_id = {'scenario': {}}
+    # Mapping between local networks and WIMs
+    wim_usage = {}
+
+    def ip_profile_IM2RO(ip_profile_im):
+        # translate from input format to database format
+        ip_profile_ro = {}
+        if 'subnet-address' in ip_profile_im:
+            ip_profile_ro['subnet_address'] = ip_profile_im['subnet-address']
+        if 'ip-version' in ip_profile_im:
+            ip_profile_ro['ip_version'] = ip_profile_im['ip-version']
+        if 'gateway-address' in ip_profile_im:
+            ip_profile_ro['gateway_address'] = ip_profile_im['gateway-address']
+        if 'dns-address' in ip_profile_im:
+            ip_profile_ro['dns_address'] = ip_profile_im['dns-address']
+            if isinstance(ip_profile_ro['dns_address'], (list, tuple)):
+                ip_profile_ro['dns_address'] = ";".join(ip_profile_ro['dns_address'])
+        if 'dhcp' in ip_profile_im:
+            ip_profile_ro['dhcp_start_address'] = ip_profile_im['dhcp'].get('start-address')
+            ip_profile_ro['dhcp_enabled'] = ip_profile_im['dhcp'].get('enabled', True)
+            ip_profile_ro['dhcp_count'] = ip_profile_im['dhcp'].get('count')
+        return ip_profile_ro
+
+    # logger.debug("Creating instance from scenario-dict:\n%s",
+    #               yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
+    try:
+        # 0 check correct parameters
+        for net_name, net_instance_desc in instance_dict.get("networks", {}).items():
+            for scenario_net in scenarioDict['nets']:
+                if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+                    break
+            else:
+                raise NfvoException("Invalid scenario network name or id '{}' at instance:networks".format(net_name),
+                                    httperrors.Bad_Request)
+            if "sites" not in net_instance_desc:
+                net_instance_desc["sites"] = [ {} ]
+            site_without_datacenter_field = False
+            for site in net_instance_desc["sites"]:
+                if site.get("datacenter"):
+                    site["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, site["datacenter"])
+                    if site["datacenter"] not in myvims:
+                        # Add this datacenter to myvims
+                        d, v = get_datacenter_by_name_uuid(mydb, tenant_id, site["datacenter"])
+                        myvims[d] = v
+                        myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, site["datacenter"])
+                        site["datacenter"] = d  # change name to id
+                else:
+                    if site_without_datacenter_field:
+                        raise NfvoException("Found more than one entries without datacenter field at "
+                                            "instance:networks:{}:sites".format(net_name), httperrors.Bad_Request)
+                    site_without_datacenter_field = True
+                    site["datacenter"] = default_datacenter_id   # change name to id
+
+        for vnf_name, vnf_instance_desc in instance_dict.get("vnfs",{}).items():
+            for scenario_vnf in scenarioDict['vnfs']:
+                if vnf_name == scenario_vnf['member_vnf_index'] or vnf_name == scenario_vnf['uuid'] or vnf_name == scenario_vnf['name']:
+                    break
+            else:
+                raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_name), httperrors.Bad_Request)
+            if "datacenter" in vnf_instance_desc:
+                # Add this datacenter to myvims
+                vnf_instance_desc["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                if vnf_instance_desc["datacenter"] not in myvims:
+                    d, v = get_datacenter_by_name_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                    myvims[d] = v
+                    myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                scenario_vnf["datacenter"] = vnf_instance_desc["datacenter"]
+
+            for net_id, net_instance_desc in vnf_instance_desc.get("networks", {}).items():
+                for scenario_net in scenario_vnf['nets']:
+                    if net_id == scenario_net['osm_id'] or net_id == scenario_net['uuid'] or net_id == scenario_net["name"]:
+                        break
+                else:
+                    raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), httperrors.Bad_Request)
+                if net_instance_desc.get("vim-network-name"):
+                    scenario_net["vim-network-name"] = net_instance_desc["vim-network-name"]
+                if net_instance_desc.get("vim-network-id"):
+                    scenario_net["vim-network-id"] = net_instance_desc["vim-network-id"]
+                if net_instance_desc.get("name"):
+                    scenario_net["name"] = net_instance_desc["name"]
+                if 'ip-profile' in net_instance_desc:
+                    ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
+                    if 'ip_profile' not in scenario_net:
+                        scenario_net['ip_profile'] = ipprofile_db
+                    else:
+                        update(scenario_net['ip_profile'], ipprofile_db)
+
+            for vdu_id, vdu_instance_desc in vnf_instance_desc.get("vdus", {}).items():
+                for scenario_vm in scenario_vnf['vms']:
+                    if vdu_id == scenario_vm['osm_id'] or vdu_id == scenario_vm["name"]:
+                        break
+                else:
+                    raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
+                scenario_vm["instance_parameters"] = vdu_instance_desc
+                for iface_id, iface_instance_desc in vdu_instance_desc.get("interfaces", {}).items():
+                    for scenario_interface in scenario_vm['interfaces']:
+                        if iface_id == scenario_interface['internal_name'] or iface_id == scenario_interface["external_name"]:
+                            scenario_interface.update(iface_instance_desc)
+                            break
+                    else:
+                        raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
+
+        # 0.1 parse cloud-config parameters
+        cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config"))
+
+        # 0.2 merge instance information into scenario
+        # Ideally, the operation should be as simple as: update(scenarioDict,instance_dict)
+        # However, this is not possible yet.
+        for net_name, net_instance_desc in instance_dict.get("networks", {}).items():
+            for scenario_net in scenarioDict['nets']:
+                if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+                    if "wim_account" in net_instance_desc and net_instance_desc["wim_account"] is not None:
+                        scenario_net["wim_account"] = net_instance_desc["wim_account"]
+                    if 'ip-profile' in net_instance_desc:
+                        ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
+                        if 'ip_profile' not in scenario_net:
+                            scenario_net['ip_profile'] = ipprofile_db
+                        else:
+                            update(scenario_net['ip_profile'], ipprofile_db)
+            for interface in net_instance_desc.get('interfaces', ()):
+                if 'ip_address' in interface:
+                    for vnf in scenarioDict['vnfs']:
+                        if interface['vnf'] == vnf['name']:
+                            for vnf_interface in vnf['interfaces']:
+                                if interface['vnf_interface'] == vnf_interface['external_name']:
+                                    vnf_interface['ip_address'] = interface['ip_address']
+
+        # logger.debug(">>>>>>>> Merged dictionary")
+        # logger.debug("Creating instance scenario-dict MERGED:\n%s",
+        #              yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
+
+        # 1. Creating new nets (sce_nets) in the VIM"
+        number_mgmt_networks = 0
+        db_instance_nets = []
+        for sce_net in scenarioDict['nets']:
+            sce_net_uuid = sce_net.get('uuid', sce_net["name"])
+            # get involved datacenters where this network need to be created
+            involved_datacenters = []
+            for sce_vnf in scenarioDict.get("vnfs", ()):
+                vnf_datacenter = sce_vnf.get("datacenter", default_datacenter_id)
+                if vnf_datacenter in involved_datacenters:
+                    continue
+                if sce_vnf.get("interfaces"):
+                    for sce_vnf_ifaces in sce_vnf["interfaces"]:
+                        if sce_vnf_ifaces.get("sce_net_id") == sce_net["uuid"]:
+                            involved_datacenters.append(vnf_datacenter)
+                            break
+            if not involved_datacenters:
+                involved_datacenters.append(default_datacenter_id)
+            target_wim_account = sce_net.get("wim_account", default_wim_account)
+
+            # --> WIM
+            # TODO: use this information during network creation
+            wim_account_id = wim_account_name = None
+            if len(involved_datacenters) > 1 and 'uuid' in sce_net:
+                if target_wim_account is None or target_wim_account is True:  # automatic selection of WIM
+                    # OBS: sce_net without uuid are used internally to VNFs
+                    # and the assumption is that VNFs will not be split among
+                    # different datacenters
+                    wim_account = wim_engine.find_suitable_wim_account(
+                        involved_datacenters, tenant_id)
+                    wim_account_id = wim_account['uuid']
+                    wim_account_name = wim_account['name']
+                    wim_usage[sce_net['uuid']] = wim_account_id
+                elif isinstance(target_wim_account, str):     # manual selection of WIM
+                    wim_account.persist.get_wim_account_by(target_wim_account, tenant_id)
+                    wim_account_id = wim_account['uuid']
+                    wim_account_name = wim_account['name']
+                    wim_usage[sce_net['uuid']] = wim_account_id
+                else:  # not WIM usage
+                    wim_usage[sce_net['uuid']] = False
+            # <-- WIM
+
+            descriptor_net = {}
+            if instance_dict.get("networks"):
+                if sce_net.get("uuid") in instance_dict["networks"]:
+                    descriptor_net = instance_dict["networks"][sce_net["uuid"]]
+                    descriptor_net_name = sce_net["uuid"]
+                elif sce_net.get("osm_id") in instance_dict["networks"]:
+                    descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
+                    descriptor_net_name = sce_net["osm_id"]
+                elif sce_net["name"] in instance_dict["networks"]:
+                    descriptor_net = instance_dict["networks"][sce_net["name"]]
+                    descriptor_net_name = sce_net["name"]
+            net_name = descriptor_net.get("vim-network-name")
+            # add datacenters from instantiation parameters
+            if descriptor_net.get("sites"):
+                for site in descriptor_net["sites"]:
+                    if site.get("datacenter") and site["datacenter"] not in involved_datacenters:
+                        involved_datacenters.append(site["datacenter"])
+            sce_net2instance[sce_net_uuid] = {}
+            net2task_id['scenario'][sce_net_uuid] = {}
+
+            use_network = None
+            related_network = None
+            if descriptor_net.get("use-network"):
+                target_instance_nets = mydb.get_rows(
+                    SELECT="related",
+                    FROM="instance_nets",
+                    WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
+                           "osm_id":  descriptor_net["use-network"]["osm_id"]},
+                )
+                if not target_instance_nets:
+                    raise NfvoException(
+                        "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
+                        httperrors.Bad_Request)
+                else:
+                    use_network = target_instance_nets[0]["related"]
+
+            if sce_net["external"]:
+                number_mgmt_networks += 1
+
+            for datacenter_id in involved_datacenters:
+                netmap_use = None
+                netmap_create = None
+                if descriptor_net.get("sites"):
+                    for site in descriptor_net["sites"]:
+                        if site.get("datacenter") == datacenter_id:
+                            netmap_use = site.get("netmap-use")
+                            netmap_create = site.get("netmap-create")
+                            break
+
+                vim = myvims[datacenter_id]
+                myvim_thread_id = myvim_threads_id[datacenter_id]
+
+                net_type = sce_net['type']
+                net_vim_name = None
+                lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'}  # 'shared': True
+
+                if not net_name:
+                    if sce_net["external"]:
+                        net_name = sce_net["name"]
+                    else:
+                        net_name = "{}-{}".format(instance_name, sce_net["name"])
+                        net_name = net_name[:255]     # limit length
+
+                if netmap_use or netmap_create:
+                    create_network = False
+                    lookfor_network = False
+                    if netmap_use:
+                        lookfor_network = True
+                        if utils.check_valid_uuid(netmap_use):
+                            lookfor_filter["id"] = netmap_use
+                        else:
+                            lookfor_filter["name"] = netmap_use
+                    if netmap_create:
+                        create_network = True
+                        net_vim_name = net_name
+                        if isinstance(netmap_create, str):
+                            net_vim_name = netmap_create
+                elif sce_net.get("vim_network_name"):
+                    create_network = False
+                    lookfor_network = True
+                    lookfor_filter["name"] = sce_net.get("vim_network_name")
+                elif sce_net["external"]:
+                    if sce_net.get('vim_id'):
+                        # there is a netmap at datacenter_nets database   # TODO REVISE!!!!
+                        create_network = False
+                        lookfor_network = True
+                        lookfor_filter["id"] = sce_net['vim_id']
+                    elif vim["config"].get("management_network_id") or vim["config"].get("management_network_name"):
+                        if number_mgmt_networks > 1:
+                            raise NfvoException("Found several VLD of type mgmt. "
+                                                "You must concrete what vim-network must be use for each one",
+                                                httperrors.Bad_Request)
+                        create_network = False
+                        lookfor_network = True
+                        if vim["config"].get("management_network_id"):
+                            lookfor_filter["id"] = vim["config"]["management_network_id"]
+                        else:
+                            lookfor_filter["name"] = vim["config"]["management_network_name"]
+                    else:
+                        # There is not a netmap, look at datacenter for a net with this name and create if not found
+                        create_network = True
+                        lookfor_network = True
+                        lookfor_filter["name"] = sce_net["name"]
+                        net_vim_name = sce_net["name"]
+                else:
+                    net_vim_name = net_name
+                    create_network = True
+                    lookfor_network = False
+
+                task_extra = {}
+                if create_network:
+                    task_action = "CREATE"
+                    task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None), wim_account_name)
+                    if lookfor_network:
+                        task_extra["find"] = (lookfor_filter,)
+                elif lookfor_network:
+                    task_action = "FIND"
+                    task_extra["params"] = (lookfor_filter,)
+
+                # fill database content
+                net_uuid = str(uuid4())
+                uuid_list.append(net_uuid)
+                sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
+                if not related_network:   # all db_instance_nets will have same related
+                    related_network = use_network or net_uuid
+                db_net = {
+                    "uuid": net_uuid,
+                    "osm_id": sce_net.get("osm_id") or sce_net["name"],
+                    "related": related_network,
+                    'vim_net_id': None,
+                    "vim_name": net_vim_name,
+                    "instance_scenario_id": instance_uuid,
+                    "sce_net_id": sce_net.get("uuid"),
+                    "created": create_network,
+                    'datacenter_id': datacenter_id,
+                    'datacenter_tenant_id': myvim_thread_id,
+                    'status': 'BUILD' #  if create_network else "ACTIVE"
+                }
+                db_instance_nets.append(db_net)
+                db_vim_action = {
+                    "instance_action_id": instance_action_id,
+                    "status": "SCHEDULED",
+                    "task_index": task_index,
+                    "datacenter_vim_id": myvim_thread_id,
+                    "action": task_action,
+                    "item": "instance_nets",
+                    "item_id": net_uuid,
+                    "related": related_network,
+                    "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
+                }
+                net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
+                task_index += 1
+                db_vim_actions.append(db_vim_action)
+
+            if 'ip_profile' in sce_net:
+                db_ip_profile={
+                    'instance_net_id': net_uuid,
+                    'ip_version': sce_net['ip_profile']['ip_version'],
+                    'subnet_address': sce_net['ip_profile']['subnet_address'],
+                    'gateway_address': sce_net['ip_profile']['gateway_address'],
+                    'dns_address': sce_net['ip_profile']['dns_address'],
+                    'dhcp_enabled': sce_net['ip_profile']['dhcp_enabled'],
+                    'dhcp_start_address': sce_net['ip_profile']['dhcp_start_address'],
+                    'dhcp_count': sce_net['ip_profile']['dhcp_count'],
+                }
+                db_ip_profiles.append(db_ip_profile)
+
+        # Create VNFs
+        vnf_params = {
+            "default_datacenter_id": default_datacenter_id,
+            "myvim_threads_id": myvim_threads_id,
+            "instance_uuid": instance_uuid,
+            "instance_name": instance_name,
+            "instance_action_id": instance_action_id,
+            "myvims": myvims,
+            "cloud_config": cloud_config,
+            "RO_pub_key": tenant[0].get('RO_pub_key'),
+            "instance_parameters": instance_dict,
+        }
+        vnf_params_out = {
+            "task_index": task_index,
+            "uuid_list": uuid_list,
+            "db_instance_nets": db_instance_nets,
+            "db_vim_actions": db_vim_actions,
+            "db_ip_profiles": db_ip_profiles,
+            "db_instance_vnfs": db_instance_vnfs,
+            "db_instance_vms": db_instance_vms,
+            "db_instance_interfaces": db_instance_interfaces,
+            "net2task_id": net2task_id,
+            "sce_net2instance": sce_net2instance,
+        }
+        # sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name'])
+        for sce_vnf in scenarioDict.get('vnfs', ()):  # sce_vnf_list:
+            instantiate_vnf(mydb, sce_vnf, vnf_params, vnf_params_out, rollbackList)
+        task_index = vnf_params_out["task_index"]
+        uuid_list = vnf_params_out["uuid_list"]
+
+        # Create VNFFGs
+        # task_depends_on = []
+        for vnffg in scenarioDict.get('vnffgs', ()):
+            for rsp in vnffg['rsps']:
+                sfs_created = []
+                for cp in rsp['connection_points']:
+                    count = mydb.get_rows(
+                            SELECT='vms.count',
+                            FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h "
+                                 "on interfaces.uuid=h.ingress_interface_id",
+                            WHERE={'h.uuid': cp['uuid']})[0]['count']
+                    instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
+                    instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+                    dependencies = []
+                    for instance_vm in instance_vms:
+                        action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+                        if action:
+                            dependencies.append(action['task_index'])
+                        # TODO: throw exception if count != len(instance_vms)
+                        # TODO: and action shouldn't ever be None
+                    sfis_created = []
+                    for i in range(count):
+                        # create sfis
+                        sfi_uuid = str(uuid4())
+                        extra_params = {
+                            "ingress_interface_id": cp["ingress_interface_id"],
+                            "egress_interface_id": cp["egress_interface_id"]
+                        }
+                        uuid_list.append(sfi_uuid)
+                        db_sfi = {
+                            "uuid": sfi_uuid,
+                            "related": sfi_uuid,
+                            "instance_scenario_id": instance_uuid,
+                            'sce_rsp_hop_id': cp['uuid'],
+                            'datacenter_id': datacenter_id,
+                            'datacenter_tenant_id': myvim_thread_id,
+                            "vim_sfi_id": None, # vim thread will populate
+                        }
+                        db_instance_sfis.append(db_sfi)
+                        db_vim_action = {
+                            "instance_action_id": instance_action_id,
+                            "task_index": task_index,
+                            "datacenter_vim_id": myvim_thread_id,
+                            "action": "CREATE",
+                            "status": "SCHEDULED",
+                            "item": "instance_sfis",
+                            "item_id": sfi_uuid,
+                            "related": sfi_uuid,
+                            "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
+                                                    default_flow_style=True, width=256)
+                        }
+                        sfis_created.append(task_index)
+                        task_index += 1
+                        db_vim_actions.append(db_vim_action)
+                    # create sfs
+                    sf_uuid = str(uuid4())
+                    uuid_list.append(sf_uuid)
+                    db_sf = {
+                        "uuid": sf_uuid,
+                        "related": sf_uuid,
+                        "instance_scenario_id": instance_uuid,
+                        'sce_rsp_hop_id': cp['uuid'],
+                        'datacenter_id': datacenter_id,
+                        'datacenter_tenant_id': myvim_thread_id,
+                        "vim_sf_id": None, # vim thread will populate
+                    }
+                    db_instance_sfs.append(db_sf)
+                    db_vim_action = {
+                        "instance_action_id": instance_action_id,
+                        "task_index": task_index,
+                        "datacenter_vim_id": myvim_thread_id,
+                        "action": "CREATE",
+                        "status": "SCHEDULED",
+                        "item": "instance_sfs",
+                        "item_id": sf_uuid,
+                        "related": sf_uuid,
+                        "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
+                                                default_flow_style=True, width=256)
+                    }
+                    sfs_created.append(task_index)
+                    task_index += 1
+                    db_vim_actions.append(db_vim_action)
+                classifier = rsp['classifier']
+
+                # TODO the following ~13 lines can be reused for the sfi case
+                count = mydb.get_rows(
+                        SELECT=('vms.count'),
+                        FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
+                        WHERE={'c.uuid': classifier['uuid']})[0]['count']
+                instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
+                instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+                dependencies = []
+                for instance_vm in instance_vms:
+                    action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+                    if action:
+                        dependencies.append(action['task_index'])
+                    # TODO: throw exception if count != len(instance_vms)
+                    # TODO: and action shouldn't ever be None
+                classifications_created = []
+                for i in range(count):
+                    for match in classifier['matches']:
+                        # create classifications
+                        classification_uuid = str(uuid4())
+                        uuid_list.append(classification_uuid)
+                        db_classification = {
+                            "uuid": classification_uuid,
+                            "related": classification_uuid,
+                            "instance_scenario_id": instance_uuid,
+                            'sce_classifier_match_id': match['uuid'],
+                            'datacenter_id': datacenter_id,
+                            'datacenter_tenant_id': myvim_thread_id,
+                            "vim_classification_id": None, # vim thread will populate
+                        }
+                        db_instance_classifications.append(db_classification)
+                        classification_params = {
+                            "ip_proto": match["ip_proto"],
+                            "source_ip": match["source_ip"],
+                            "destination_ip": match["destination_ip"],
+                            "source_port": match["source_port"],
+                            "destination_port": match["destination_port"]
+                        }
+                        db_vim_action = {
+                            "instance_action_id": instance_action_id,
+                            "task_index": task_index,
+                            "datacenter_vim_id": myvim_thread_id,
+                            "action": "CREATE",
+                            "status": "SCHEDULED",
+                            "item": "instance_classifications",
+                            "item_id": classification_uuid,
+                            "related": classification_uuid,
+                            "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
+                                                    default_flow_style=True, width=256)
+                        }
+                        classifications_created.append(task_index)
+                        task_index += 1
+                        db_vim_actions.append(db_vim_action)
+
+                # create sfps
+                sfp_uuid = str(uuid4())
+                uuid_list.append(sfp_uuid)
+                db_sfp = {
+                    "uuid": sfp_uuid,
+                    "related": sfp_uuid,
+                    "instance_scenario_id": instance_uuid,
+                    'sce_rsp_id': rsp['uuid'],
+                    'datacenter_id': datacenter_id,
+                    'datacenter_tenant_id': myvim_thread_id,
+                    "vim_sfp_id": None, # vim thread will populate
+                }
+                db_instance_sfps.append(db_sfp)
+                db_vim_action = {
+                    "instance_action_id": instance_action_id,
+                    "task_index": task_index,
+                    "datacenter_vim_id": myvim_thread_id,
+                    "action": "CREATE",
+                    "status": "SCHEDULED",
+                    "item": "instance_sfps",
+                    "item_id": sfp_uuid,
+                    "related": sfp_uuid,
+                    "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
+                                            default_flow_style=True, width=256)
+                }
+                task_index += 1
+                db_vim_actions.append(db_vim_action)
+        db_instance_action["number_tasks"] = task_index
+
+        # --> WIM
+        logger.debug('wim_usage:\n%s\n\n', pformat(wim_usage))
+        wan_links = wim_engine.derive_wan_links(wim_usage, db_instance_nets, tenant_id)
+        wim_actions = wim_engine.create_actions(wan_links)
+        wim_actions, db_instance_action = (
+            wim_engine.incorporate_actions(wim_actions, db_instance_action))
+        # <-- WIM
+
+        scenarioDict["datacenter2tenant"] = myvim_threads_id
+
+        db_instance_scenario['datacenter_tenant_id'] = myvim_threads_id[default_datacenter_id]
+        db_instance_scenario['datacenter_id'] = default_datacenter_id
+        db_tables=[
+            {"instance_scenarios": db_instance_scenario},
+            {"instance_vnfs": db_instance_vnfs},
+            {"instance_nets": db_instance_nets},
+            {"ip_profiles": db_ip_profiles},
+            {"instance_vms": db_instance_vms},
+            {"instance_interfaces": db_instance_interfaces},
+            {"instance_actions": db_instance_action},
+            {"instance_sfis": db_instance_sfis},
+            {"instance_sfs": db_instance_sfs},
+            {"instance_classifications": db_instance_classifications},
+            {"instance_sfps": db_instance_sfps},
+            {"instance_wim_nets": wan_links},
+            {"vim_wim_actions": db_vim_actions + wim_actions}
+        ]
+
+        logger.debug("create_instance done DB tables: %s",
+                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+        mydb.new_rows(db_tables, uuid_list)
+        for myvim_thread_id in myvim_threads_id.values():
+            vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
+
+        wim_engine.dispatch(wim_actions)
+
+        returned_instance = mydb.get_instance_scenario(instance_uuid)
+        returned_instance["action_id"] = instance_action_id
+        return returned_instance
+    except (NfvoException, vimconn.vimconnException, wimconn.WimConnectorError, db_base_Exception) as e:
+        message = rollback(mydb, myvims, rollbackList)
+        if isinstance(e, db_base_Exception):
+            error_text = "database Exception"
+        elif isinstance(e, vimconn.vimconnException):
+            error_text = "VIM Exception"
+        elif isinstance(e, wimconn.WimConnectorError):
+            error_text = "WIM Exception"
+        else:
+            error_text = "Exception"
+        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+        # logger.error("create_instance: %s", error_text)
+        logger.exception(e)
+        raise NfvoException(error_text, e.http_code)
+
+
+def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
+    default_datacenter_id = params["default_datacenter_id"]
+    myvim_threads_id = params["myvim_threads_id"]
+    instance_uuid = params["instance_uuid"]
+    instance_name = params["instance_name"]
+    instance_action_id = params["instance_action_id"]
+    myvims = params["myvims"]
+    cloud_config = params["cloud_config"]
+    RO_pub_key = params["RO_pub_key"]
+
+    task_index = params_out["task_index"]
+    uuid_list = params_out["uuid_list"]
+    db_instance_nets = params_out["db_instance_nets"]
+    db_vim_actions = params_out["db_vim_actions"]
+    db_ip_profiles = params_out["db_ip_profiles"]
+    db_instance_vnfs = params_out["db_instance_vnfs"]
+    db_instance_vms = params_out["db_instance_vms"]
+    db_instance_interfaces = params_out["db_instance_interfaces"]
+    net2task_id = params_out["net2task_id"]
+    sce_net2instance = params_out["sce_net2instance"]
+
+    vnf_net2instance = {}
+
+    # 2. Creating new nets (vnf internal nets) in the VIM"
+    # For each vnf net, we create it and we add it to instanceNetlist.
+    if sce_vnf.get("datacenter"):
+        datacenter_id = sce_vnf["datacenter"]
+        myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
+    else:
+        datacenter_id = default_datacenter_id
+        myvim_thread_id = myvim_threads_id[default_datacenter_id]
+    for net in sce_vnf['nets']:
+        # TODO revis
+        # descriptor_net = instance_dict.get("vnfs", {}).get(sce_vnf["name"], {})
+        # net_name = descriptor_net.get("name")
+        net_name = None
+        if not net_name:
+            net_name = "{}-{}".format(instance_name, net["name"])
+            net_name = net_name[:255]  # limit length
+        net_type = net['type']
+
+        if sce_vnf['uuid'] not in vnf_net2instance:
+            vnf_net2instance[sce_vnf['uuid']] = {}
+        if sce_vnf['uuid'] not in net2task_id:
+            net2task_id[sce_vnf['uuid']] = {}
+        net2task_id[sce_vnf['uuid']][net['uuid']] = task_index
+
+        # fill database content
+        net_uuid = str(uuid4())
+        uuid_list.append(net_uuid)
+        vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
+        db_net = {
+            "uuid": net_uuid,
+            "related": net_uuid,
+            'vim_net_id': None,
+            "vim_name": net_name,
+            "instance_scenario_id": instance_uuid,
+            "net_id": net["uuid"],
+            "created": True,
+            'datacenter_id': datacenter_id,
+            'datacenter_tenant_id': myvim_thread_id,
+        }
+        db_instance_nets.append(db_net)
+
+        lookfor_filter = {}
+        if net.get("vim-network-name"):
+            lookfor_filter["name"] = net["vim-network-name"]
+        if net.get("vim-network-id"):
+            lookfor_filter["id"] = net["vim-network-id"]
+        if lookfor_filter:
+            task_action = "FIND"
+            task_extra = {"params": (lookfor_filter,)}
+        else:
+            task_action = "CREATE"
+            task_extra = {"params": (net_name, net_type, net.get('ip_profile', None))}
+
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": myvim_thread_id,
+            "status": "SCHEDULED",
+            "action": task_action,
+            "item": "instance_nets",
+            "item_id": net_uuid,
+            "related": net_uuid,
+            "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+        if 'ip_profile' in net:
+            db_ip_profile = {
+                'instance_net_id': net_uuid,
+                'ip_version': net['ip_profile']['ip_version'],
+                'subnet_address': net['ip_profile']['subnet_address'],
+                'gateway_address': net['ip_profile']['gateway_address'],
+                'dns_address': net['ip_profile']['dns_address'],
+                'dhcp_enabled': net['ip_profile']['dhcp_enabled'],
+                'dhcp_start_address': net['ip_profile']['dhcp_start_address'],
+                'dhcp_count': net['ip_profile']['dhcp_count'],
+            }
+            db_ip_profiles.append(db_ip_profile)
+
+    # print "vnf_net2instance:"
+    # print yaml.safe_dump(vnf_net2instance, indent=4, default_flow_style=False)
+
+    # 3. Creating new vm instances in the VIM
+    # myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+    ssh_access = None
+    if sce_vnf.get('mgmt_access'):
+        ssh_access = sce_vnf['mgmt_access'].get('config-access', {}).get('ssh-access')
+    vnf_availability_zones = []
+    for vm in sce_vnf.get('vms'):
+        vm_av = vm.get('availability_zone')
+        if vm_av and vm_av not in vnf_availability_zones:
+            vnf_availability_zones.append(vm_av)
+
+    # check if there is enough availability zones available at vim level.
+    if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+        if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+            raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
+
+    if sce_vnf.get("datacenter"):
+        vim = myvims[sce_vnf["datacenter"]]
+        myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
+        datacenter_id = sce_vnf["datacenter"]
+    else:
+        vim = myvims[default_datacenter_id]
+        myvim_thread_id = myvim_threads_id[default_datacenter_id]
+        datacenter_id = default_datacenter_id
+    sce_vnf["datacenter_id"] = datacenter_id
+    i = 0
+
+    vnf_uuid = str(uuid4())
+    uuid_list.append(vnf_uuid)
+    db_instance_vnf = {
+        'uuid': vnf_uuid,
+        'instance_scenario_id': instance_uuid,
+        'vnf_id': sce_vnf['vnf_id'],
+        'sce_vnf_id': sce_vnf['uuid'],
+        'datacenter_id': datacenter_id,
+        'datacenter_tenant_id': myvim_thread_id,
+    }
+    db_instance_vnfs.append(db_instance_vnf)
+
+    for vm in sce_vnf['vms']:
+        # skip PDUs
+        if vm.get("pdu_type"):
+            continue
+
+        myVMDict = {}
+        sce_vnf_name = sce_vnf['member_vnf_index'] if sce_vnf['member_vnf_index'] else sce_vnf['name']
+        myVMDict['name'] = "{}-{}-{}".format(instance_name[:64], sce_vnf_name[:64], vm["name"][:64])
+        myVMDict['description'] = myVMDict['name'][0:99]
+        #                if not startvms:
+        #                    myVMDict['start'] = "no"
+        if vm.get("instance_parameters") and vm["instance_parameters"].get("name"):
+            myVMDict['name'] = vm["instance_parameters"].get("name")
+        myVMDict['name'] = myVMDict['name'][0:255]  # limit name length
+        # create image at vim in case it not exist
+        image_uuid = vm['image_id']
+        if vm.get("image_list"):
+            for alternative_image in vm["image_list"]:
+                if alternative_image["vim_type"] == vim["config"]["_vim_type_internal"]:
+                    image_uuid = alternative_image['image_id']
+                    break
+        image_dict = mydb.get_table_by_uuid_name("images", image_uuid)
+        image_id = create_or_use_image(mydb, {datacenter_id: vim}, image_dict, [], True)
+        vm['vim_image_id'] = image_id
+
+        # create flavor at vim in case it not exist
+        flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+        if flavor_dict['extended'] != None:
+            flavor_dict['extended'] = yaml.load(flavor_dict['extended'], Loader=yaml.Loader)
+        flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)
+
+        # Obtain information for additional disks
+        extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',),
+                                             WHERE={'vim_id': flavor_id})
+        if not extended_flavor_dict:
+            raise NfvoException("flavor '{}' not found".format(flavor_id), httperrors.Not_Found)
+
+        # extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0], Loader=yaml.Loader)
+        myVMDict['disks'] = None
+        extended_info = extended_flavor_dict[0]['extended']
+        if extended_info != None:
+            extended_flavor_dict_yaml = yaml.load(extended_info, Loader=yaml.Loader)
+            if 'disks' in extended_flavor_dict_yaml:
+                myVMDict['disks'] = extended_flavor_dict_yaml['disks']
+                if vm.get("instance_parameters") and vm["instance_parameters"].get("devices"):
+                    for disk in myVMDict['disks']:
+                        if disk.get("name") in vm["instance_parameters"]["devices"]:
+                            disk.update(vm["instance_parameters"]["devices"][disk.get("name")])
+
+        vm['vim_flavor_id'] = flavor_id
+        myVMDict['imageRef'] = vm['vim_image_id']
+        myVMDict['flavorRef'] = vm['vim_flavor_id']
+        myVMDict['availability_zone'] = vm.get('availability_zone')
+        myVMDict['networks'] = []
+        task_depends_on = []
+        # TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
+        is_management_vm = False
+        db_vm_ifaces = []
+        for iface in vm['interfaces']:
+            netDict = {}
+            if iface['type'] == "data":
+                netDict['type'] = iface['model']
+            elif "model" in iface and iface["model"] != None:
+                netDict['model'] = iface['model']
+            # TODO in future, remove this because mac_address will not be set, and the type of PV,VF
+            # is obtained from iterface table model
+            # discover type of interface looking at flavor
+            for numa in flavor_dict.get('extended', {}).get('numas', []):
+                for flavor_iface in numa.get('interfaces', []):
+                    if flavor_iface.get('name') == iface['internal_name']:
+                        if flavor_iface['dedicated'] == 'yes':
+                            netDict['type'] = "PF"  # passthrough
+                        elif flavor_iface['dedicated'] == 'no':
+                            netDict['type'] = "VF"  # siov
+                        elif flavor_iface['dedicated'] == 'yes:sriov':
+                            netDict['type'] = "VFnotShared"  # sriov but only one sriov on the PF
+                        netDict["mac_address"] = flavor_iface.get("mac_address")
+                        break
+            netDict["use"] = iface['type']
+            if netDict["use"] == "data" and not netDict.get("type"):
+                # print "netDict", netDict
+                # print "iface", iface
+                e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".fromat(
+                    sce_vnf['name'], vm['name'], iface['internal_name'])
+                if flavor_dict.get('extended') == None:
+                    raise NfvoException(e_text + "After database migration some information is not available. \
+                            Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
+                else:
+                    raise NfvoException(e_text, httperrors.Internal_Server_Error)
+            if netDict["use"] == "mgmt":
+                is_management_vm = True
+                netDict["type"] = "virtual"
+            if netDict["use"] == "bridge":
+                netDict["type"] = "virtual"
+            if iface.get("vpci"):
+                netDict['vpci'] = iface['vpci']
+            if iface.get("mac"):
+                netDict['mac_address'] = iface['mac']
+            if iface.get("mac_address"):
+                netDict['mac_address'] = iface['mac_address']
+            if iface.get("ip_address"):
+                netDict['ip_address'] = iface['ip_address']
+            if iface.get("port-security") is not None:
+                netDict['port_security'] = iface['port-security']
+            if iface.get("floating-ip") is not None:
+                netDict['floating_ip'] = iface['floating-ip']
+            netDict['name'] = iface['internal_name']
+            if iface['net_id'] is None:
+                for vnf_iface in sce_vnf["interfaces"]:
+                    # print iface
+                    # print vnf_iface
+                    if vnf_iface['interface_id'] == iface['uuid']:
+                        netDict['net_id'] = "TASK-{}".format(
+                            net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
+                        instance_net_id = sce_net2instance[vnf_iface['sce_net_id']][datacenter_id]
+                        task_depends_on.append(net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
+                        break
+            else:
+                netDict['net_id'] = "TASK-{}".format(net2task_id[sce_vnf['uuid']][iface['net_id']])
+                instance_net_id = vnf_net2instance[sce_vnf['uuid']][iface['net_id']]
+                task_depends_on.append(net2task_id[sce_vnf['uuid']][iface['net_id']])
+            # skip bridge ifaces not connected to any net
+            if 'net_id' not in netDict or netDict['net_id'] == None:
+                continue
+            myVMDict['networks'].append(netDict)
+            db_vm_iface = {
+                # "uuid"
+                # 'instance_vm_id': instance_vm_uuid,
+                "instance_net_id": instance_net_id,
+                'interface_id': iface['uuid'],
+                # 'vim_interface_id': ,
+                'type': 'external' if iface['external_name'] is not None else 'internal',
+                'ip_address': iface.get('ip_address'),
+                'mac_address': iface.get('mac'),
+                'floating_ip': int(iface.get('floating-ip', False)),
+                'port_security': int(iface.get('port-security', True))
+            }
+            db_vm_ifaces.append(db_vm_iface)
+        # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+        # print myVMDict['name']
+        # print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+        # print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+        # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+
+        # We add the RO key to cloud_config if vnf will need ssh access
+        cloud_config_vm = cloud_config
+        if is_management_vm and params["instance_parameters"].get("mgmt_keys"):
+            cloud_config_vm = unify_cloud_config({"key-pairs": params["instance_parameters"]["mgmt_keys"]},
+                                                  cloud_config_vm)
+
+        if vm.get("instance_parameters") and "mgmt_keys" in vm["instance_parameters"]:
+            if vm["instance_parameters"]["mgmt_keys"]:
+                cloud_config_vm = unify_cloud_config({"key-pairs": vm["instance_parameters"]["mgmt_keys"]},
+                                                     cloud_config_vm)
+            if RO_pub_key:
+                cloud_config_vm = unify_cloud_config(cloud_config_vm, {"key-pairs": [RO_pub_key]})
+        if vm.get("boot_data"):
+            cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config_vm)
+
+        if myVMDict.get('availability_zone'):
+            av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+        else:
+            av_index = None
+        for vm_index in range(0, vm.get('count', 1)):
+            vm_name = myVMDict['name'] + "-" + str(vm_index+1)
+            task_params = (vm_name, myVMDict['description'], myVMDict.get('start', None),
+                           myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'], cloud_config_vm,
+                           myVMDict['disks'], av_index, vnf_availability_zones)
+            # put interface uuid back to scenario[vnfs][vms[[interfaces]
+            for net in myVMDict['networks']:
+                if "vim_id" in net:
+                    for iface in vm['interfaces']:
+                        if net["name"] == iface["internal_name"]:
+                            iface["vim_id"] = net["vim_id"]
+                            break
+            vm_uuid = str(uuid4())
+            uuid_list.append(vm_uuid)
+            db_vm = {
+                "uuid": vm_uuid,
+                "related": vm_uuid,
+                'instance_vnf_id': vnf_uuid,
+                # TODO delete "vim_vm_id": vm_id,
+                "vm_id": vm["uuid"],
+                "vim_name": vm_name,
+                # "status":
+            }
+            db_instance_vms.append(db_vm)
+
+            iface_index = 0
+            for db_vm_iface in db_vm_ifaces:
+                iface_uuid = str(uuid4())
+                uuid_list.append(iface_uuid)
+                db_vm_iface_instance = {
+                    "uuid": iface_uuid,
+                    "instance_vm_id": vm_uuid
+                }
+                db_vm_iface_instance.update(db_vm_iface)
+                if db_vm_iface_instance.get("ip_address"):  # increment ip_address
+                    ip = db_vm_iface_instance.get("ip_address")
+                    i = ip.rfind(".")
+                    if i > 0:
+                        try:
+                            i += 1
+                            ip = ip[i:] + str(int(ip[:i]) + 1)
+                            db_vm_iface_instance["ip_address"] = ip
+                        except:
+                            db_vm_iface_instance["ip_address"] = None
+                db_instance_interfaces.append(db_vm_iface_instance)
+                myVMDict['networks'][iface_index]["uuid"] = iface_uuid
+                iface_index += 1
+
+            db_vim_action = {
+                "instance_action_id": instance_action_id,
+                "task_index": task_index,
+                "datacenter_vim_id": myvim_thread_id,
+                "action": "CREATE",
+                "status": "SCHEDULED",
+                "item": "instance_vms",
+                "item_id": vm_uuid,
+                "related": vm_uuid,
+                "extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
+                                        default_flow_style=True, width=256)
+            }
+            task_index += 1
+            db_vim_actions.append(db_vim_action)
+    params_out["task_index"] = task_index
+    params_out["uuid_list"] = uuid_list
+
+
+def delete_instance(mydb, tenant_id, instance_id):
+    # print "Checking that the instance_id exists and getting the instance dictionary"
+    instanceDict = mydb.get_instance_scenario(instance_id, tenant_id)
+    # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+    tenant_id = instanceDict["tenant_id"]
+
+    # --> WIM
+    # We need to retrieve the WIM Actions now, before the instance_scenario is
+    # deleted. The reason for that is that: ON CASCADE rules will delete the
+    # instance_wim_nets record in the database
+    wim_actions = wim_engine.delete_actions(instance_scenario_id=instance_id)
+    # <-- WIM
+
+    # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+    # 1. Delete from Database
+    message = mydb.delete_instance_scenario(instance_id, tenant_id)
+
+    # 2. delete from VIM
+    error_msg = ""
+    myvims = {}
+    myvim_threads = {}
+    vimthread_affected = {}
+    net2vm_dependencies = {}
+
+    task_index = 0
+    instance_action_id = get_task_id()
+    db_vim_actions = []
+    db_instance_action = {
+        "uuid": instance_action_id,   # same uuid for the instance and the action on create
+        "tenant_id": tenant_id,
+        "instance_id": instance_id,
+        "description": "DELETE",
+        # "number_tasks": 0 # filled bellow
+    }
+
+    # 2.1 deleting VNFFGs
+    for sfp in instanceDict.get('sfps', ()):
+        vimthread_affected[sfp["datacenter_tenant_id"]] = None
+        datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _, myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+                           datacenter_tenant_id=sfp["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = next(iter(vims.values()))
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+            continue
+        extra = {"params": (sfp['vim_sfp_id'])}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": sfp["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_sfps",
+            "item_id": sfp["uuid"],
+            "related": sfp["related"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    for classification in instanceDict['classifications']:
+        vimthread_affected[classification["datacenter_tenant_id"]] = None
+        datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _, myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+                           datacenter_tenant_id=classification["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"],
+                                                                                               classification["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = next(iter(vims.values()))
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'],
+                                                                                                                   classification["datacenter_id"])
+            continue
+        depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+        extra = {"params": (classification['vim_classification_id']), "depends_on": depends_on}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": classification["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_classifications",
+            "item_id": classification["uuid"],
+            "related": classification["related"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    for sf in instanceDict.get('sfs', ()):
+        vimthread_affected[sf["datacenter_tenant_id"]] = None
+        datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _, myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
+                           datacenter_tenant_id=sf["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = next(iter(vims.values()))
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
+            continue
+        depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+        extra = {"params": (sf['vim_sf_id']), "depends_on": depends_on}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": sf["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_sfs",
+            "item_id": sf["uuid"],
+            "related": sf["related"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    for sfi in instanceDict.get('sfis', ()):
+        vimthread_affected[sfi["datacenter_tenant_id"]] = None
+        datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _, myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
+                           datacenter_tenant_id=sfi["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = next(iter(vims.values()))
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
+            continue
+        depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfs"]
+        extra = {"params": (sfi['vim_sfi_id']), "depends_on": depends_on}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": sfi["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_sfis",
+            "item_id": sfi["uuid"],
+            "related": sfi["related"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    # 2.2 deleting VMs
+    # vm_fail_list=[]
+    for sce_vnf in instanceDict.get('vnfs', ()):
+        datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+        vimthread_affected[sce_vnf["datacenter_tenant_id"]] = None
+        if datacenter_key not in myvims:
+            try:
+                _, myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
+                           datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
+                                                                                               sce_vnf["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = next(iter(vims.values()))
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        for vm in sce_vnf['vms']:
+            if not myvim:
+                error_msg += "\n    VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
+                continue
+            sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+            db_vim_action = {
+                "instance_action_id": instance_action_id,
+                "task_index": task_index,
+                "datacenter_vim_id": sce_vnf["datacenter_tenant_id"],
+                "action": "DELETE",
+                "status": "SCHEDULED",
+                "item": "instance_vms",
+                "item_id": vm["uuid"],
+                "related": vm["related"],
+                "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
+                                        default_flow_style=True, width=256)
+            }
+            db_vim_actions.append(db_vim_action)
+            for interface in vm["interfaces"]:
+                if not interface.get("instance_net_id"):
+                    continue
+                if interface["instance_net_id"] not in net2vm_dependencies:
+                    net2vm_dependencies[interface["instance_net_id"]] = []
+                net2vm_dependencies[interface["instance_net_id"]].append(task_index)
+            task_index += 1
+
+    # 2.3 deleting NETS
+    # net_fail_list=[]
+    for net in instanceDict['nets']:
+        vimthread_affected[net["datacenter_tenant_id"]] = None
+        datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, net["datacenter_id"], net["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
+                           datacenter_tenant_id=net["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = next(iter(vims.values()))
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
+            continue
+        extra = {"params": (net['vim_net_id'], net['sdn_net_id'])}
+        if net2vm_dependencies.get(net["uuid"]):
+            extra["depends_on"] = net2vm_dependencies[net["uuid"]]
+        sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+        if len(sfi_dependencies) > 0:
+            if "depends_on" in extra:
+                extra["depends_on"] += sfi_dependencies
+            else:
+                extra["depends_on"] = sfi_dependencies
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": net["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_nets",
+            "item_id": net["uuid"],
+            "related": net["related"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    db_instance_action["number_tasks"] = task_index
+
+    # --> WIM
+    wim_actions, db_instance_action = (
+        wim_engine.incorporate_actions(wim_actions, db_instance_action))
+    # <-- WIM
+
+    db_tables = [
+        {"instance_actions": db_instance_action},
+        {"vim_wim_actions": db_vim_actions + wim_actions}
+    ]
+
+    logger.debug("delete_instance done DB tables: %s",
+                 yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
+    mydb.new_rows(db_tables, ())
+    for myvim_thread_id in vimthread_affected.keys():
+        vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
+
+    wim_engine.dispatch(wim_actions)
+
+    if len(error_msg) > 0:
+        return 'action_id={} instance {} deleted but some elements could not be deleted, or already deleted '\
+               '(error: 404) from VIM: {}'.format(instance_action_id, message, error_msg)
+    else:
+        return "action_id={} instance {} deleted".format(instance_action_id, message)
+
+def get_instance_id(mydb, tenant_id, instance_id):
+    global ovim
+    #check valid tenant_id
+    check_tenant(mydb, tenant_id)
+    #obtain data
+
+    instance_dict = mydb.get_instance_scenario(instance_id, tenant_id, verbose=True)
+    for net in instance_dict["nets"]:
+        if net.get("sdn_net_id"):
+            net_sdn = ovim.show_network(net["sdn_net_id"])
+            net["sdn_info"] = {
+                "admin_state_up": net_sdn.get("admin_state_up"),
+                "flows": net_sdn.get("flows"),
+                "last_error": net_sdn.get("last_error"),
+                "ports": net_sdn.get("ports"),
+                "type": net_sdn.get("type"),
+                "status": net_sdn.get("status"),
+                "vlan": net_sdn.get("vlan"),
+            }
+    return instance_dict
+
+@deprecated("Instance is automatically refreshed by vim_threads")
+def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenant=None):
+    '''Refreshes a scenario instance. It modifies instanceDict'''
+    '''Returns:
+         - result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
+         - error_msg
+    '''
+    # # Assumption: nfvo_tenant and instance_id were checked before entering into this function
+    # #print "nfvo.refresh_instance begins"
+    # #print json.dumps(instanceDict, indent=4)
+    #
+    # #print "Getting the VIM URL and the VIM tenant_id"
+    # myvims={}
+    #
+    # # 1. Getting VIM vm and net list
+    # vms_updated = [] #List of VM instance uuids in openmano that were updated
+    # vms_notupdated=[]
+    # vm_list = {}
+    # for sce_vnf in instanceDict['vnfs']:
+    #     datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+    #     if datacenter_key not in vm_list:
+    #         vm_list[datacenter_key] = []
+    #     if datacenter_key not in myvims:
+    #         vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
+    #                        datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+    #         if len(vims) == 0:
+    #             logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
+    #             myvims[datacenter_key] = None
+    #         else:
+    #             myvims[datacenter_key] = next(iter(vims.values()))
+    #     for vm in sce_vnf['vms']:
+    #         vm_list[datacenter_key].append(vm['vim_vm_id'])
+    #         vms_notupdated.append(vm["uuid"])
+    #
+    # nets_updated = [] #List of VM instance uuids in openmano that were updated
+    # nets_notupdated=[]
+    # net_list = {}
+    # for net in instanceDict['nets']:
+    #     datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+    #     if datacenter_key not in net_list:
+    #         net_list[datacenter_key] = []
+    #     if datacenter_key not in myvims:
+    #         vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
+    #                        datacenter_tenant_id=net["datacenter_tenant_id"])
+    #         if len(vims) == 0:
+    #             logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+    #             myvims[datacenter_key] = None
+    #         else:
+    #             myvims[datacenter_key] = next(iter(vims.values()))
+    #
+    #     net_list[datacenter_key].append(net['vim_net_id'])
+    #     nets_notupdated.append(net["uuid"])
+    #
+    # # 1. Getting the status of all VMs
+    # vm_dict={}
+    # for datacenter_key in myvims:
+    #     if not vm_list.get(datacenter_key):
+    #         continue
+    #     failed = True
+    #     failed_message=""
+    #     if not myvims[datacenter_key]:
+    #         failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+    #     else:
+    #         try:
+    #             vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
+    #             failed = False
+    #         except vimconn.vimconnException as e:
+    #             logger.error("VIM exception %s %s", type(e).__name__, str(e))
+    #             failed_message = str(e)
+    #     if failed:
+    #         for vm in vm_list[datacenter_key]:
+    #             vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+    #
+    # # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
+    # for sce_vnf in instanceDict['vnfs']:
+    #     for vm in sce_vnf['vms']:
+    #         vm_id = vm['vim_vm_id']
+    #         interfaces = vm_dict[vm_id].pop('interfaces', [])
+    #         #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
+    #         has_mgmt_iface = False
+    #         for iface in vm["interfaces"]:
+    #             if iface["type"]=="mgmt":
+    #                 has_mgmt_iface = True
+    #         if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
+    #             vm_dict[vm_id]['status'] = "ACTIVE"
+    #         if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
+    #             vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
+    #         if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
+    #             vm['status']    = vm_dict[vm_id]['status']
+    #             vm['error_msg'] = vm_dict[vm_id].get('error_msg')
+    #             vm['vim_info']  = vm_dict[vm_id].get('vim_info')
+    #             # 2.1. Update in openmano DB the VMs whose status changed
+    #             try:
+    #                 updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
+    #                 vms_notupdated.remove(vm["uuid"])
+    #                 if updates>0:
+    #                     vms_updated.append(vm["uuid"])
+    #             except db_base_Exception as e:
+    #                 logger.error("nfvo.refresh_instance error database update: %s", str(e))
+    #         # 2.2. Update in openmano DB the interface VMs
+    #         for interface in interfaces:
+    #             #translate from vim_net_id to instance_net_id
+    #             network_id_list=[]
+    #             for net in instanceDict['nets']:
+    #                 if net["vim_net_id"] == interface["vim_net_id"]:
+    #                     network_id_list.append(net["uuid"])
+    #             if not network_id_list:
+    #                 continue
+    #             del interface["vim_net_id"]
+    #             try:
+    #                 for network_id in network_id_list:
+    #                     mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
+    #             except db_base_Exception as e:
+    #                 logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
+    #
+    # # 3. Getting the status of all nets
+    # net_dict = {}
+    # for datacenter_key in myvims:
+    #     if not net_list.get(datacenter_key):
+    #         continue
+    #     failed = True
+    #     failed_message = ""
+    #     if not myvims[datacenter_key]:
+    #         failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+    #     else:
+    #         try:
+    #             net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
+    #             failed = False
+    #         except vimconn.vimconnException as e:
+    #             logger.error("VIM exception %s %s", type(e).__name__, str(e))
+    #             failed_message = str(e)
+    #     if failed:
+    #         for net in net_list[datacenter_key]:
+    #             net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+    #
+    # # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
+    # # TODO: update nets inside a vnf
+    # for net in instanceDict['nets']:
+    #     net_id = net['vim_net_id']
+    #     if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
+    #         net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
+    #     if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
+    #         net['status']    = net_dict[net_id]['status']
+    #         net['error_msg'] = net_dict[net_id].get('error_msg')
+    #         net['vim_info']  = net_dict[net_id].get('vim_info')
+    #         # 5.1. Update in openmano DB the nets whose status changed
+    #         try:
+    #             updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
+    #             nets_notupdated.remove(net["uuid"])
+    #             if updated>0:
+    #                 nets_updated.append(net["uuid"])
+    #         except db_base_Exception as e:
+    #             logger.error("nfvo.refresh_instance error database update: %s", str(e))
+    #
+    # # Returns appropriate output
+    # #print "nfvo.refresh_instance finishes"
+    # logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
+    #             str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
+    instance_id = instanceDict['uuid']
+    # if len(vms_notupdated)+len(nets_notupdated)>0:
+    #     error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
+    #     return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
+
+    return 0, 'Scenario instance ' + instance_id + ' refreshed.'
+
+def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
+    #print "Checking that the instance_id exists and getting the instance dictionary"
+    instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
+    #print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+
+    #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+    vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
+    if len(vims) == 0:
+        raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), httperrors.Not_Found)
+    myvim = next(iter(vims.values()))
+    vm_result = {}
+    vm_error = 0
+    vm_ok = 0
+
+    myvim_threads_id = {}
+    if action_dict.get("vdu-scaling"):
+        db_instance_vms = []
+        db_vim_actions = []
+        db_instance_interfaces = []
+        instance_action_id = get_task_id()
+        db_instance_action = {
+            "uuid": instance_action_id,   # same uuid for the instance and the action on create
+            "tenant_id": nfvo_tenant,
+            "instance_id": instance_id,
+            "description": "SCALE",
+        }
+        vm_result["instance_action_id"] = instance_action_id
+        vm_result["created"] = []
+        vm_result["deleted"] = []
+        task_index = 0
+        for vdu in action_dict["vdu-scaling"]:
+            vdu_id = vdu.get("vdu-id")
+            osm_vdu_id = vdu.get("osm_vdu_id")
+            member_vnf_index = vdu.get("member-vnf-index")
+            vdu_count = vdu.get("count", 1)
+            if vdu_id:
+                target_vms = mydb.get_rows(
+                    FROM="instance_vms as vms join instance_vnfs as vnfs on vms.instance_vnf_id=vnfs.uuid",
+                    WHERE={"vms.uuid": vdu_id},
+                    ORDER_BY="vms.created_at"
+                )
+                if not target_vms:
+                    raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), httperrors.Not_Found)
+            else:
+                if not osm_vdu_id and not member_vnf_index:
+                    raise NfvoException("Invalid input vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
+                target_vms = mydb.get_rows(
+                    # SELECT=("ivms.uuid", "ivnfs.datacenter_id", "ivnfs.datacenter_tenant_id"),
+                    FROM="instance_vms as ivms join instance_vnfs as ivnfs on ivms.instance_vnf_id=ivnfs.uuid"\
+                         " join sce_vnfs as svnfs on ivnfs.sce_vnf_id=svnfs.uuid"\
+                         " join vms on ivms.vm_id=vms.uuid",
+                    WHERE={"vms.osm_id": osm_vdu_id, "svnfs.member_vnf_index": member_vnf_index,
+                           "ivnfs.instance_scenario_id": instance_id},
+                    ORDER_BY="ivms.created_at"
+                )
+                if not target_vms:
+                    raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), httperrors.Not_Found)
+                vdu_id = target_vms[-1]["uuid"]
+            target_vm = target_vms[-1]
+            datacenter = target_vm["datacenter_id"]
+            myvim_threads_id[datacenter], _ = get_vim_thread(mydb, nfvo_tenant, datacenter)
+
+            if vdu["type"] == "delete":
+                for index in range(0, vdu_count):
+                    target_vm = target_vms[-1-index]
+                    vdu_id = target_vm["uuid"]
+                    # look for nm
+                    vm_interfaces = None
+                    for sce_vnf in instanceDict['vnfs']:
+                        for vm in sce_vnf['vms']:
+                            if vm["uuid"] == vdu_id:
+                                # TODO revise this should not be vm["uuid"]   instance_vms["vm_id"]
+                                vm_interfaces = vm["interfaces"]
+                                break
+
+                    db_vim_action = {
+                        "instance_action_id": instance_action_id,
+                        "task_index": task_index,
+                        "datacenter_vim_id": target_vm["datacenter_tenant_id"],
+                        "action": "DELETE",
+                        "status": "SCHEDULED",
+                        "item": "instance_vms",
+                        "item_id": vdu_id,
+                        "related": target_vm["related"],
+                        "extra": yaml.safe_dump({"params": vm_interfaces},
+                                                default_flow_style=True, width=256)
+                    }
+                    task_index += 1
+                    db_vim_actions.append(db_vim_action)
+                    vm_result["deleted"].append(vdu_id)
+                    # delete from database
+                    db_instance_vms.append({"TO-DELETE": vdu_id})
+
+            else:  # vdu["type"] == "create":
+                iface2iface = {}
+                where = {"item": "instance_vms", "item_id": target_vm["uuid"], "action": "CREATE"}
+
+                vim_action_to_clone = mydb.get_rows(FROM="vim_wim_actions", WHERE=where)
+                if not vim_action_to_clone:
+                    raise NfvoException("Cannot find the vim_action at database with {}".format(where), httperrors.Internal_Server_Error)
+                vim_action_to_clone = vim_action_to_clone[0]
+                extra = yaml.safe_load(vim_action_to_clone["extra"])
+
+                # generate a new depends_on. Convert format TASK-Y into new format TASK-ACTION-XXXX.XXXX.Y
+                # TODO do the same for flavor and image when available
+                task_depends_on = []
+                task_params = extra["params"]
+                task_params_networks = deepcopy(task_params[5])
+                for iface in task_params[5]:
+                    if iface["net_id"].startswith("TASK-"):
+                        if "." not in iface["net_id"]:
+                            task_depends_on.append("{}.{}".format(vim_action_to_clone["instance_action_id"],
+                                                             iface["net_id"][5:]))
+                            iface["net_id"] = "TASK-{}.{}".format(vim_action_to_clone["instance_action_id"],
+                                                                  iface["net_id"][5:])
+                        else:
+                            task_depends_on.append(iface["net_id"][5:])
+                    if "mac_address" in iface:
+                        del iface["mac_address"]
+
+                vm_ifaces_to_clone = mydb.get_rows(FROM="instance_interfaces", WHERE={"instance_vm_id": target_vm["uuid"]})
+                for index in range(0, vdu_count):
+                    vm_uuid = str(uuid4())
+                    vm_name = target_vm.get('vim_name')
+                    try:
+                        suffix = vm_name.rfind("-")
+                        vm_name = vm_name[:suffix+1] + str(index + 1 + int(vm_name[suffix+1:]))
+                    except Exception:
+                        pass
+                    db_instance_vm = {
+                        "uuid": vm_uuid,
+                        'related': vm_uuid,
+                        'instance_vnf_id': target_vm['instance_vnf_id'],
+                        'vm_id': target_vm['vm_id'],
+                        'vim_name': vm_name,
+                    }
+                    db_instance_vms.append(db_instance_vm)
+
+                    for vm_iface in vm_ifaces_to_clone:
+                        iface_uuid = str(uuid4())
+                        iface2iface[vm_iface["uuid"]] = iface_uuid
+                        db_vm_iface = {
+                            "uuid": iface_uuid,
+                            'instance_vm_id': vm_uuid,
+                            "instance_net_id": vm_iface["instance_net_id"],
+                            'interface_id': vm_iface['interface_id'],
+                            'type': vm_iface['type'],
+                            'floating_ip': vm_iface['floating_ip'],
+                            'port_security': vm_iface['port_security']
+                        }
+                        db_instance_interfaces.append(db_vm_iface)
+                    task_params_copy = deepcopy(task_params)
+                    for iface in task_params_copy[5]:
+                        iface["uuid"] = iface2iface[iface["uuid"]]
+                        # increment ip_address
+                        if "ip_address" in iface:
+                            ip = iface.get("ip_address")
+                            i = ip.rfind(".")
+                            if i > 0:
+                                try:
+                                    i += 1
+                                    ip = ip[i:] + str(int(ip[:i]) + 1)
+                                    iface["ip_address"] = ip
+                                except:
+                                    iface["ip_address"] = None
+                    if vm_name:
+                        task_params_copy[0] = vm_name
+                    db_vim_action = {
+                        "instance_action_id": instance_action_id,
+                        "task_index": task_index,
+                        "datacenter_vim_id": vim_action_to_clone["datacenter_vim_id"],
+                        "action": "CREATE",
+                        "status": "SCHEDULED",
+                        "item": "instance_vms",
+                        "item_id": vm_uuid,
+                        "related": vm_uuid,
+                        # ALF
+                        # ALF
+                        # TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
+                        # ALF
+                        # ALF
+                        "extra": yaml.safe_dump({"params": task_params_copy, "depends_on": task_depends_on}, default_flow_style=True, width=256)
+                    }
+                    task_index += 1
+                    db_vim_actions.append(db_vim_action)
+                    vm_result["created"].append(vm_uuid)
+
+        db_instance_action["number_tasks"] = task_index
+        db_tables = [
+            {"instance_vms": db_instance_vms},
+            {"instance_interfaces": db_instance_interfaces},
+            {"instance_actions": db_instance_action},
+            # TODO revise sfps
+            # {"instance_sfis": db_instance_sfis},
+            # {"instance_sfs": db_instance_sfs},
+            # {"instance_classifications": db_instance_classifications},
+            # {"instance_sfps": db_instance_sfps},
+            {"vim_wim_actions": db_vim_actions}
+        ]
+        logger.debug("create_vdu done DB tables: %s",
+                     yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
+        mydb.new_rows(db_tables, [])
+        for myvim_thread in myvim_threads_id.values():
+            vim_threads["running"][myvim_thread].insert_task(db_vim_actions)
+
+        return vm_result
+
+    input_vnfs = action_dict.pop("vnfs", [])
+    input_vms = action_dict.pop("vms", [])
+    action_over_all = True if not input_vnfs and not input_vms else False
+    for sce_vnf in instanceDict['vnfs']:
+        for vm in sce_vnf['vms']:
+            if not action_over_all and sce_vnf['uuid'] not in input_vnfs and sce_vnf['vnf_name'] not in input_vnfs and \
+                    sce_vnf['member_vnf_index'] not in input_vnfs and \
+                    vm['uuid'] not in input_vms and vm['name'] not in input_vms and \
+                    sce_vnf['member_vnf_index'] + "-" + vm['vdu_osm_id'] not in input_vms:  # TODO conside vm_count_index
+                continue
+            try:
+                if "add_public_key" in action_dict:
+                    if sce_vnf.get('mgmt_access'):
+                        mgmt_access = yaml.load(sce_vnf['mgmt_access'], Loader=yaml.Loader)
+                        if not input_vms and mgmt_access.get("vdu-id") != vm['vdu_osm_id']:
+                            continue
+                        default_user = mgmt_access.get("default-user")
+                        password = mgmt_access.get("password")
+                        if mgmt_access.get(vm['vdu_osm_id']):
+                            default_user = mgmt_access[vm['vdu_osm_id']].get("default-user", default_user)
+                            password = mgmt_access[vm['vdu_osm_id']].get("password", password)
+
+                        tenant = mydb.get_rows_by_id('nfvo_tenants', nfvo_tenant)
+                        try:
+                            if 'ip_address' in vm:
+                                    mgmt_ip = vm['ip_address'].split(';')
+                                    priv_RO_key = decrypt_key(tenant[0]['encrypted_RO_priv_key'], tenant[0]['uuid'])
+                                    data  = myvim.inject_user_key(mgmt_ip[0], action_dict.get('user', default_user),
+                                                          action_dict['add_public_key'],
+                                                          password=password, ro_key=priv_RO_key)
+                                    vm_result[ vm['uuid'] ] = {"vim_result": 200,
+                                                       "description": "Public key injected",
+                                                       "name":vm['name']
+                                                    }
+
+                        except KeyError:
+                            raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
+                                                httperrors.Internal_Server_Error)
+                    else:
+                        raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
+                                            httperrors.Internal_Server_Error)
+                else:
+                    data = myvim.action_vminstance(vm['vim_vm_id'], action_dict)
+                    if "console" in action_dict:
+                        if not global_config["http_console_proxy"]:
+                            vm_result[ vm['uuid'] ] = {"vim_result": 200,
+                                                       "description": "{protocol}//{ip}:{port}/{suffix}".format(
+                                                                                    protocol=data["protocol"],
+                                                                                    ip = data["server"],
+                                                                                    port = data["port"],
+                                                                                    suffix = data["suffix"]),
+                                                       "name":vm['name']
+                                                    }
+                            vm_ok +=1
+                        elif data["server"]=="127.0.0.1" or data["server"]=="localhost":
+                            vm_result[ vm['uuid'] ] = {"vim_result": -httperrors.Unauthorized,
+                                                       "description": "this console is only reachable by local interface",
+                                                       "name":vm['name']
+                                                    }
+                            vm_error+=1
+                        else:
+                        #print "console data", data
+                            try:
+                                console_thread = create_or_use_console_proxy_thread(data["server"], data["port"])
+                                vm_result[ vm['uuid'] ] = {"vim_result": 200,
+                                                           "description": "{protocol}//{ip}:{port}/{suffix}".format(
+                                                                                        protocol=data["protocol"],
+                                                                                        ip = global_config["http_console_host"],
+                                                                                        port = console_thread.port,
+                                                                                        suffix = data["suffix"]),
+                                                           "name":vm['name']
+                                                        }
+                                vm_ok +=1
+                            except NfvoException as e:
+                                vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+                                vm_error+=1
+
+                    else:
+                        vm_result[ vm['uuid'] ] = {"vim_result": 200, "description": "ok", "name":vm['name']}
+                        vm_ok +=1
+            except vimconn.vimconnException as e:
+                vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+                vm_error+=1
+
+    if vm_ok==0: #all goes wrong
+        return vm_result
+    else:
+        return vm_result
+
+def instance_action_get(mydb, nfvo_tenant, instance_id, action_id):
+    filter = {}
+    if nfvo_tenant and nfvo_tenant != "any":
+        filter["tenant_id"] = nfvo_tenant
+    if instance_id and instance_id != "any":
+        filter["instance_id"] = instance_id
+    if action_id:
+        filter["uuid"] = action_id
+    rows = mydb.get_rows(FROM="instance_actions", WHERE=filter)
+    if action_id:
+        if not rows:
+            raise NfvoException("Not found any action with this criteria", httperrors.Not_Found)
+        vim_wim_actions = mydb.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": action_id})
+        rows[0]["vim_wim_actions"] = vim_wim_actions
+        # for backward compatibility set vim_actions = vim_wim_actions
+        rows[0]["vim_actions"] = vim_wim_actions
+    return {"actions": rows}
+
+
+def create_or_use_console_proxy_thread(console_server, console_port):
+    #look for a non-used port
+    console_thread_key = console_server + ":" + str(console_port)
+    if console_thread_key in global_config["console_thread"]:
+        #global_config["console_thread"][console_thread_key].start_timeout()
+        return global_config["console_thread"][console_thread_key]
+
+    for port in  global_config["console_port_iterator"]():
+        #print "create_or_use_console_proxy_thread() port:", port
+        if port in global_config["console_ports"]:
+            continue
+        try:
+            clithread = cli.ConsoleProxyThread(global_config['http_host'], port, console_server, console_port)
+            clithread.start()
+            global_config["console_thread"][console_thread_key] = clithread
+            global_config["console_ports"][port] = console_thread_key
+            return clithread
+        except cli.ConsoleProxyExceptionPortUsed as e:
+            #port used, try with onoher
+            continue
+        except cli.ConsoleProxyException as e:
+            raise NfvoException(str(e), httperrors.Bad_Request)
+    raise NfvoException("Not found any free 'http_console_ports'", httperrors.Conflict)
+
+
+def check_tenant(mydb, tenant_id):
+    '''check that tenant exists at database'''
+    tenant = mydb.get_rows(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
+    if not tenant:
+        raise NfvoException("tenant '{}' not found".format(tenant_id), httperrors.Not_Found)
+    return
+
+def new_tenant(mydb, tenant_dict):
+
+    tenant_uuid = str(uuid4())
+    tenant_dict['uuid'] = tenant_uuid
+    try:
+        pub_key, priv_key = create_RO_keypair(tenant_uuid)
+        tenant_dict['RO_pub_key'] = pub_key
+        tenant_dict['encrypted_RO_priv_key'] = priv_key
+        mydb.new_row("nfvo_tenants", tenant_dict, confidential_data=True)
+    except db_base_Exception as e:
+        raise NfvoException("Error creating the new tenant: {} ".format(tenant_dict['name']) + str(e), e.http_code)
+    return tenant_uuid
+
+def delete_tenant(mydb, tenant):
+    #get nfvo_tenant info
+
+    tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant, 'tenant')
+    mydb.delete_row_by_id("nfvo_tenants", tenant_dict['uuid'])
+    return tenant_dict['uuid'] + " " + tenant_dict["name"]
+
+
+def new_datacenter(mydb, datacenter_descriptor):
+    sdn_port_mapping = None
+    if "config" in datacenter_descriptor:
+        sdn_port_mapping = datacenter_descriptor["config"].pop("sdn-port-mapping", None)
+        datacenter_descriptor["config"] = yaml.safe_dump(datacenter_descriptor["config"], default_flow_style=True,
+                                                         width=256)
+    # Check that datacenter-type is correct
+    datacenter_type = datacenter_descriptor.get("type", "openvim");
+    # module_info = None
+
+    # load plugin
+    plugin_name = "rovim_" + datacenter_type
+    if plugin_name not in plugins:
+        _load_vim_plugin(plugin_name)
+
+    datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True)
+    if sdn_port_mapping:
+        try:
+            datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, sdn_port_mapping)
+        except Exception as e:
+            mydb.delete_row_by_id("datacenters", datacenter_id)   # Rollback
+            raise e
+    return datacenter_id
+
+
+def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
+    # obtain data, check that only one exist
+    datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
+
+    # edit data
+    datacenter_id = datacenter['uuid']
+    where = {'uuid': datacenter['uuid']}
+    remove_port_mapping = False
+    new_sdn_port_mapping = None
+    if "config" in datacenter_descriptor:
+        if datacenter_descriptor['config'] != None:
+            try:
+                new_config_dict = datacenter_descriptor["config"]
+                if "sdn-port-mapping" in new_config_dict:
+                    remove_port_mapping = True
+                    new_sdn_port_mapping = new_config_dict.pop("sdn-port-mapping")
+                # delete null fields
+                to_delete = []
+                for k in new_config_dict:
+                    if new_config_dict[k] is None:
+                        to_delete.append(k)
+                        if k == 'sdn-controller':
+                            remove_port_mapping = True
+
+                config_text = datacenter.get("config")
+                if not config_text:
+                    config_text = '{}'
+                config_dict = yaml.load(config_text, Loader=yaml.Loader)
+                config_dict.update(new_config_dict)
+                # delete null fields
+                for k in to_delete:
+                    del config_dict[k]
+            except Exception as e:
+                raise NfvoException("Bad format at datacenter:config " + str(e), httperrors.Bad_Request)
+        if config_dict:
+            datacenter_descriptor["config"] = yaml.safe_dump(config_dict, default_flow_style=True, width=256)
+        else:
+            datacenter_descriptor["config"] = None
+        if remove_port_mapping:
+            try:
+                datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
+            except ovimException as e:
+                raise NfvoException("Error deleting datacenter-port-mapping " + str(e), httperrors.Conflict)
+
+    mydb.update_rows('datacenters', datacenter_descriptor, where)
+    if new_sdn_port_mapping:
+        try:
+            datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, new_sdn_port_mapping)
+        except ovimException as e:
+            # Rollback
+            mydb.update_rows('datacenters', datacenter, where)
+            raise NfvoException("Error adding datacenter-port-mapping " + str(e), httperrors.Conflict)
+    return datacenter_id
+
+
+def delete_datacenter(mydb, datacenter):
+    #get nfvo_tenant info
+    datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
+    mydb.delete_row_by_id("datacenters", datacenter_dict['uuid'])
+    try:
+        datacenter_sdn_port_mapping_delete(mydb, None, datacenter_dict['uuid'])
+    except ovimException as e:
+        raise NfvoException("Error deleting datacenter-port-mapping " + str(e))
+    return datacenter_dict['uuid'] + " " + datacenter_dict['name']
+
+
+def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None, vim_tenant=None, vim_tenant_name=None,
+                       vim_username=None, vim_password=None, config=None):
+    # get datacenter info
+    try:
+        if not datacenter_id:
+            if not vim_id:
+                raise NfvoException("You must provide 'vim_id", http_code=httperrors.Bad_Request)
+            datacenter_id = vim_id
+        datacenter_id, datacenter_name = get_datacenter_uuid(mydb, None, datacenter_id)
+
+        create_vim_tenant = True if not vim_tenant and not vim_tenant_name else False
+
+        # get nfvo_tenant info
+        tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
+        if vim_tenant_name==None:
+            vim_tenant_name=tenant_dict['name']
+
+        tenants_datacenter_dict={"nfvo_tenant_id":tenant_dict['uuid'], "datacenter_id":datacenter_id }
+        # #check that this association does not exist before
+        # tenants_datacenters = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+        # if len(tenants_datacenters)>0:
+        #     raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Conflict)
+
+        vim_tenant_id_exist_atdb=False
+        if not create_vim_tenant:
+            where_={"datacenter_id": datacenter_id}
+            if vim_tenant!=None:
+                where_["vim_tenant_id"] = vim_tenant
+            if vim_tenant_name!=None:
+                where_["vim_tenant_name"] = vim_tenant_name
+            #check if vim_tenant_id is already at database
+            datacenter_tenants_dict = mydb.get_rows(FROM='datacenter_tenants', WHERE=where_)
+            if len(datacenter_tenants_dict)>=1:
+                datacenter_tenants_dict = datacenter_tenants_dict[0]
+                vim_tenant_id_exist_atdb=True
+                #TODO check if a field has changed and edit entry at datacenter_tenants at DB
+            else: #result=0
+                datacenter_tenants_dict = {}
+                #insert at table datacenter_tenants
+        else: #if vim_tenant==None:
+            #create tenant at VIM if not provided
+            try:
+                _, myvim = get_datacenter_by_name_uuid(mydb, None, datacenter, vim_user=vim_username,
+                                                                   vim_passwd=vim_password)
+                datacenter_name = myvim["name"]
+                vim_tenant = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
+            except vimconn.vimconnException as e:
+                raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant_id, str(e)), httperrors.Internal_Server_Error)
+            datacenter_tenants_dict = {}
+            datacenter_tenants_dict["created"]="true"
+
+        #fill datacenter_tenants table
+        if not vim_tenant_id_exist_atdb:
+            datacenter_tenants_dict["vim_tenant_id"] = vim_tenant
+            datacenter_tenants_dict["vim_tenant_name"] = vim_tenant_name
+            datacenter_tenants_dict["user"] = vim_username
+            datacenter_tenants_dict["passwd"] = vim_password
+            datacenter_tenants_dict["datacenter_id"] = datacenter_id
+            if name:
+                datacenter_tenants_dict["name"] = name
+            else:
+                datacenter_tenants_dict["name"] = datacenter_name
+            if config:
+                datacenter_tenants_dict["config"] = yaml.safe_dump(config, default_flow_style=True, width=256)
+            id_ = mydb.new_row('datacenter_tenants', datacenter_tenants_dict, add_uuid=True, confidential_data=True)
+            datacenter_tenants_dict["uuid"] = id_
+
+        #fill tenants_datacenters table
+        datacenter_tenant_id = datacenter_tenants_dict["uuid"]
+        tenants_datacenter_dict["datacenter_tenant_id"] = datacenter_tenant_id
+        mydb.new_row('tenants_datacenters', tenants_datacenter_dict)
+
+        # create thread
+        thread_name = get_non_used_vim_name(datacenter_name, datacenter_id, tenant_dict['name'], tenant_dict['uuid'])
+        new_thread = vim_thread(task_lock, plugins, thread_name, datacenter_name, datacenter_tenant_id,
+                                db=db, db_lock=db_lock, ovim=ovim)
+        new_thread.start()
+        thread_id = datacenter_tenants_dict["uuid"]
+        vim_threads["running"][thread_id] = new_thread
+        return thread_id
+    except vimconn.vimconnException as e:
+        raise NfvoException(str(e), httperrors.Bad_Request)
+
+
+def edit_vim_account(mydb, nfvo_tenant, datacenter_tenant_id, datacenter_id=None, name=None, vim_tenant=None,
+                              vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
+
+    # get vim_account; check is valid for this tenant
+    from_ = "datacenter_tenants as dt JOIN tenants_datacenters as td ON dt.uuid=td.datacenter_tenant_id"
+    where_ = {"td.nfvo_tenant_id": nfvo_tenant}
+    if datacenter_tenant_id:
+        where_["dt.uuid"] = datacenter_tenant_id
+    if datacenter_id:
+        where_["dt.datacenter_id"] = datacenter_id
+    vim_accounts = mydb.get_rows(SELECT="dt.uuid as uuid, config", FROM=from_, WHERE=where_)
+    if not vim_accounts:
+        raise NfvoException("vim_account not found for this tenant", http_code=httperrors.Not_Found)
+    elif len(vim_accounts) > 1:
+        raise NfvoException("found more than one vim_account for this tenant", http_code=httperrors.Conflict)
+    datacenter_tenant_id = vim_accounts[0]["uuid"]
+    original_config = vim_accounts[0]["config"]
+
+    update_ = {}
+    if config:
+        original_config_dict = yaml.load(original_config, Loader=yaml.Loader)
+        original_config_dict.update(config)
+        update["config"] = yaml.safe_dump(original_config_dict, default_flow_style=True, width=256)
+    if name:
+        update_['name'] = name
+    if vim_tenant:
+        update_['vim_tenant_id'] = vim_tenant
+    if vim_tenant_name:
+        update_['vim_tenant_name'] = vim_tenant_name
+    if vim_username:
+        update_['user'] = vim_username
+    if vim_password:
+        update_['passwd'] = vim_password
+    if update_:
+        mydb.update_rows("datacenter_tenants", UPDATE=update_, WHERE={"uuid": datacenter_tenant_id})
+
+    vim_threads["running"][datacenter_tenant_id].insert_task("reload")
+    return datacenter_tenant_id
+
+def delete_vim_account(mydb, tenant_id, vim_account_id, datacenter=None):
+    #get nfvo_tenant info
+    if not tenant_id or tenant_id=="any":
+        tenant_uuid = None
+    else:
+        tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+        tenant_uuid = tenant_dict['uuid']
+
+    #check that this association exist before
+    tenants_datacenter_dict = {}
+    if datacenter:
+        datacenter_id, _ = get_datacenter_uuid(mydb, tenant_uuid, datacenter)
+        tenants_datacenter_dict["datacenter_id"] = datacenter_id
+    elif vim_account_id:
+        tenants_datacenter_dict["datacenter_tenant_id"] = vim_account_id
+    if tenant_uuid:
+        tenants_datacenter_dict["nfvo_tenant_id"] = tenant_uuid
+    tenant_datacenter_list = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+    if len(tenant_datacenter_list)==0 and tenant_uuid:
+        raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Not_Found)
+
+    #delete this association
+    mydb.delete_row(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+
+    #get vim_tenant info and deletes
+    warning=''
+    for tenant_datacenter_item in tenant_datacenter_list:
+        vim_tenant_dict = mydb.get_table_by_uuid_name('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+        #try to delete vim:tenant
+        try:
+            mydb.delete_row_by_id('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+            if vim_tenant_dict['created']=='true':
+                #delete tenant at VIM if created by NFVO
+                try:
+                    datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+                    myvim.delete_tenant(vim_tenant_dict['vim_tenant_id'])
+                except vimconn.vimconnException as e:
+                    warning = "Not possible to delete vim_tenant_id {} from VIM: {} ".format(vim_tenant_dict['vim_tenant_id'], str(e))
+                    logger.warn(warning)
+        except db_base_Exception as e:
+            logger.error("Cannot delete datacenter_tenants " + str(e))
+            pass  # the error will be caused because dependencies, vim_tenant can not be deleted
+        thread_id = tenant_datacenter_item["datacenter_tenant_id"]
+        thread = vim_threads["running"].get(thread_id)
+        if thread:
+            thread.insert_task("exit")
+            vim_threads["deleting"][thread_id] = thread
+    return "datacenter {} detached. {}".format(datacenter_id, warning)
+
+
+def datacenter_action(mydb, tenant_id, datacenter, action_dict):
+    #DEPRECATED
+    #get datacenter info
+    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+
+    if 'check-connectivity' in action_dict:
+        try:
+            myvim.check_vim_connectivity()
+        except vimconn.vimconnException as e:
+            #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+            raise NfvoException(str(e), e.http_code)
+    elif 'net-update' in action_dict:
+        try:
+            nets = myvim.get_network_list(filter_dict={'shared': True, 'admin_state_up': True, 'status': 'ACTIVE'})
+            #print content
+        except vimconn.vimconnException as e:
+            #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+            raise NfvoException(str(e), httperrors.Internal_Server_Error)
+        #update nets Change from VIM format to NFVO format
+        net_list=[]
+        for net in nets:
+            net_nfvo={'datacenter_id': datacenter_id}
+            net_nfvo['name']       = net['name']
+            #net_nfvo['description']= net['name']
+            net_nfvo['vim_net_id'] = net['id']
+            net_nfvo['type']       = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man')  to ('bridge','data','ptp')
+            net_nfvo['shared']     = net['shared']
+            net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+            net_list.append(net_nfvo)
+        inserted, deleted = mydb.update_datacenter_nets(datacenter_id, net_list)
+        logger.info("Inserted %d nets, deleted %d old nets", inserted, deleted)
+        return inserted
+    elif 'net-edit' in action_dict:
+        net = action_dict['net-edit'].pop('net')
+        what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+        result = mydb.update_rows('datacenter_nets', action_dict['net-edit'],
+                                WHERE={'datacenter_id':datacenter_id, what: net})
+        return result
+    elif 'net-delete' in action_dict:
+        net = action_dict['net-deelte'].get('net')
+        what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+        result = mydb.delete_row(FROM='datacenter_nets',
+                                WHERE={'datacenter_id':datacenter_id, what: net})
+        return result
+
+    else:
+        raise NfvoException("Unknown action " + str(action_dict), httperrors.Bad_Request)
+
+
+def datacenter_edit_netmap(mydb, tenant_id, datacenter, netmap, action_dict):
+    #get datacenter info
+    datacenter_id, _  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+
+    what = 'uuid' if utils.check_valid_uuid(netmap) else 'name'
+    result = mydb.update_rows('datacenter_nets', action_dict['netmap'],
+                            WHERE={'datacenter_id':datacenter_id, what: netmap})
+    return result
+
+
+def datacenter_new_netmap(mydb, tenant_id, datacenter, action_dict=None):
+    #get datacenter info
+    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+    filter_dict={}
+    if action_dict:
+        action_dict = action_dict["netmap"]
+        if 'vim_id' in action_dict:
+            filter_dict["id"] = action_dict['vim_id']
+        if 'vim_name' in action_dict:
+            filter_dict["name"] = action_dict['vim_name']
+    else:
+        filter_dict["shared"] = True
+
+    try:
+        vim_nets = myvim.get_network_list(filter_dict=filter_dict)
+    except vimconn.vimconnException as e:
+        #logger.error("nfvo.datacenter_new_netmap() Not possible to get_network_list from VIM: %s ", str(e))
+        raise NfvoException(str(e), httperrors.Internal_Server_Error)
+    if len(vim_nets)>1 and action_dict:
+        raise NfvoException("more than two networks found, specify with vim_id", httperrors.Conflict)
+    elif len(vim_nets)==0: # and action_dict:
+        raise NfvoException("Not found a network at VIM with " + str(filter_dict), httperrors.Not_Found)
+    net_list=[]
+    for net in vim_nets:
+        net_nfvo={'datacenter_id': datacenter_id}
+        if action_dict and "name" in action_dict:
+            net_nfvo['name']       = action_dict['name']
+        else:
+            net_nfvo['name']       = net['name']
+        #net_nfvo['description']= net['name']
+        net_nfvo['vim_net_id'] = net['id']
+        net_nfvo['type']       = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man')  to ('bridge','data','ptp')
+        net_nfvo['shared']     = net['shared']
+        net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+        try:
+            net_id = mydb.new_row("datacenter_nets", net_nfvo, add_uuid=True)
+            net_nfvo["status"] = "OK"
+            net_nfvo["uuid"] = net_id
+        except db_base_Exception as e:
+            if action_dict:
+                raise
+            else:
+                net_nfvo["status"] = "FAIL: " + str(e)
+        net_list.append(net_nfvo)
+    return net_list
+
+def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
+    # obtain all network data
+    try:
+        if utils.check_valid_uuid(network_id):
+            filter_dict = {"id": network_id}
+        else:
+            filter_dict = {"name": network_id}
+
+        datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+        network = myvim.get_network_list(filter_dict=filter_dict)
+    except vimconn.vimconnException as e:
+        raise NfvoException("Not possible to get_sdn_net_id from VIM: {}".format(str(e)), e.http_code)
+
+    # ensure the network is defined
+    if len(network) == 0:
+        raise NfvoException("Network {} is not present in the system".format(network_id),
+                            httperrors.Bad_Request)
+
+    # ensure there is only one network with the provided name
+    if len(network) > 1:
+        raise NfvoException("Multiple networks present in vim identified by {}".format(network_id), httperrors.Bad_Request)
+
+    # ensure it is a dataplane network
+    if network[0]['type'] != 'data':
+        return None
+
+    # ensure we use the id
+    network_id = network[0]['id']
+
+    # search in dabase mano_db in table instance nets for the sdn_net_id that corresponds to the vim_net_id==network_id
+    # and with instance_scenario_id==NULL
+    #search_dict = {'vim_net_id': network_id, 'instance_scenario_id': None}
+    search_dict = {'vim_net_id': network_id}
+
+    try:
+        #sdn_network_id = mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)[0]['sdn_net_id']
+        result =  mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)
+    except db_base_Exception as e:
+        raise NfvoException("db_base_Exception obtaining SDN network to associated to vim network {}".format(
+            network_id) + str(e), e.http_code)
+
+    sdn_net_counter = 0
+    for net in result:
+        if net['sdn_net_id'] != None:
+            sdn_net_counter+=1
+            sdn_net_id = net['sdn_net_id']
+
+    if sdn_net_counter == 0:
+        return None
+    elif sdn_net_counter == 1:
+        return sdn_net_id
+    else:
+        raise NfvoException("More than one SDN network is associated to vim network {}".format(
+            network_id), httperrors.Internal_Server_Error)
+
+def get_sdn_controller_id(mydb, datacenter):
+    # Obtain sdn controller id
+    config = mydb.get_rows(SELECT=('config',), FROM='datacenters', WHERE={'uuid': datacenter})[0].get('config', '{}')
+    if not config:
+        return None
+
+    return yaml.load(config, Loader=yaml.Loader).get('sdn-controller')
+
+def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
+    try:
+        sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
+        if not sdn_network_id:
+            raise NfvoException("No SDN network is associated to vim-network {}".format(network_id), httperrors.Internal_Server_Error)
+
+        #Obtain sdn controller id
+        controller_id = get_sdn_controller_id(mydb, datacenter)
+        if not controller_id:
+            raise NfvoException("No SDN controller is set for datacenter {}".format(datacenter), httperrors.Internal_Server_Error)
+
+        #Obtain sdn controller info
+        sdn_controller = ovim.show_of_controller(controller_id)
+
+        port_data = {
+            'name': 'external_port',
+            'net_id': sdn_network_id,
+            'ofc_id': controller_id,
+            'switch_dpid': sdn_controller['dpid'],
+            'switch_port': descriptor['port']
+        }
+
+        if 'vlan' in descriptor:
+            port_data['vlan'] = descriptor['vlan']
+        if 'mac' in descriptor:
+            port_data['mac'] = descriptor['mac']
+
+        result = ovim.new_port(port_data)
+    except ovimException as e:
+        raise NfvoException("ovimException attaching SDN network {} to vim network {}".format(
+            sdn_network_id, network_id) + str(e), httperrors.Internal_Server_Error)
+    except db_base_Exception as e:
+        raise NfvoException("db_base_Exception attaching SDN network to vim network {}".format(
+            network_id) + str(e), e.http_code)
+
+    return 'Port uuid: '+ result
+
+def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
+    if port_id:
+        filter = {'uuid': port_id}
+    else:
+        sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
+        if not sdn_network_id:
+            raise NfvoException("No SDN network is associated to vim-network {}".format(network_id),
+                                httperrors.Internal_Server_Error)
+        #in case no port_id is specified only ports marked as 'external_port' will be detached
+        filter = {'name': 'external_port', 'net_id': sdn_network_id}
+
+    try:
+        port_list = ovim.get_ports(columns={'uuid'}, filter=filter)
+    except ovimException as e:
+        raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
+                            httperrors.Internal_Server_Error)
+
+    if len(port_list) == 0:
+        raise NfvoException("No ports attached to the network {} were found with the requested criteria".format(network_id),
+                            httperrors.Bad_Request)
+
+    port_uuid_list = []
+    for port in port_list:
+        try:
+            port_uuid_list.append(port['uuid'])
+            ovim.delete_port(port['uuid'])
+        except ovimException as e:
+            raise NfvoException("ovimException deleting port {} for net {}. ".format(port['uuid'], network_id) + str(e), httperrors.Internal_Server_Error)
+
+    return 'Detached ports uuid: {}'.format(','.join(port_uuid_list))
+
+def vim_action_get(mydb, tenant_id, datacenter, item, name):
+    #get datacenter info
+    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+    filter_dict={}
+    if name:
+        if utils.check_valid_uuid(name):
+            filter_dict["id"] = name
+        else:
+            filter_dict["name"] = name
+    try:
+        if item=="networks":
+            #filter_dict['tenant_id'] = myvim['tenant_id']
+            content = myvim.get_network_list(filter_dict=filter_dict)
+
+            if len(content) == 0:
+                raise NfvoException("Network {} is not present in the system. ".format(name),
+                                    httperrors.Bad_Request)
+
+            #Update the networks with the attached ports
+            for net in content:
+                sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, net['id'])
+                if sdn_network_id != None:
+                    try:
+                        #port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan'}, filter={'name': 'external_port', 'net_id': sdn_network_id})
+                        port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan','name'}, filter={'net_id': sdn_network_id})
+                    except ovimException as e:
+                        raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e), httperrors.Internal_Server_Error)
+                    #Remove field name and if port name is external_port save it as 'type'
+                    for port in port_list:
+                        if port['name'] == 'external_port':
+                            port['type'] = "External"
+                        del port['name']
+                    net['sdn_network_id'] = sdn_network_id
+                    net['sdn_attached_ports'] = port_list
+
+        elif item=="tenants":
+            content = myvim.get_tenant_list(filter_dict=filter_dict)
+        elif item == "images":
+
+            content = myvim.get_image_list(filter_dict=filter_dict)
+        else:
+            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+        logger.debug("vim_action response %s", content) #update nets Change from VIM format to NFVO format
+        if name and len(content)==1:
+            return {item[:-1]: content[0]}
+        elif name and len(content)==0:
+            raise NfvoException("No {} found with ".format(item[:-1]) + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), filter_dict.items())),
+                 datacenter)
+        else:
+            return {item: content}
+    except vimconn.vimconnException as e:
+        print("vim_action Not possible to get_{}_list from VIM: {} ".format(item, str(e)))
+        raise NfvoException("Not possible to get_{}_list from VIM: {}".format(item, str(e)), e.http_code)
+
+
+def vim_action_delete(mydb, tenant_id, datacenter, item, name):
+    #get datacenter info
+    if tenant_id == "any":
+        tenant_id=None
+
+    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+    #get uuid name
+    content = vim_action_get(mydb, tenant_id, datacenter, item, name)
+    logger.debug("vim_action_delete vim response: " + str(content))
+    items = next(iter(content.values()))
+    if type(items)==list and len(items)==0:
+        raise NfvoException("Not found " + item, httperrors.Not_Found)
+    elif type(items)==list and len(items)>1:
+        raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), httperrors.Not_Found)
+    else: # it is a dict
+        item_id = items["id"]
+        item_name = str(items.get("name"))
+
+    try:
+        if item=="networks":
+            # If there is a SDN network associated to the vim-network, proceed to clear the relationship and delete it
+            sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, item_id)
+            if sdn_network_id != None:
+                #Delete any port attachment to this network
+                try:
+                    port_list = ovim.get_ports(columns={'uuid'}, filter={'net_id': sdn_network_id})
+                except ovimException as e:
+                    raise NfvoException(
+                        "ovimException obtaining external ports for net {}. ".format(sdn_network_id) + str(e),
+                        httperrors.Internal_Server_Error)
+
+                # By calling one by one all ports to be detached we ensure that not only the external_ports get detached
+                for port in port_list:
+                    vim_net_sdn_detach(mydb, tenant_id, datacenter, item_id, port['uuid'])
+
+                #Delete from 'instance_nets' the correspondence between the vim-net-id and the sdn-net-id
+                try:
+                    mydb.delete_row(FROM='instance_nets', WHERE={'instance_scenario_id': None,
+                                                                 'sdn_net_id': sdn_network_id,
+                                                                 'vim_net_id': item_id})
+                except db_base_Exception as e:
+                    raise NfvoException("Error deleting correspondence for VIM/SDN dataplane networks{}: {}".format(
+                        item_id, e), e.http_code)
+
+                #Delete the SDN network
+                try:
+                    ovim.delete_network(sdn_network_id)
+                except ovimException as e:
+                    logger.error("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e), exc_info=True)
+                    raise NfvoException("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e),
+                                        httperrors.Internal_Server_Error)
+
+            content = myvim.delete_network(item_id)
+        elif item=="tenants":
+            content = myvim.delete_tenant(item_id)
+        elif item == "images":
+            content = myvim.delete_image(item_id)
+        else:
+            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+    except vimconn.vimconnException as e:
+        #logger.error( "vim_action Not possible to delete_{} {}from VIM: {} ".format(item, name, str(e)))
+        raise NfvoException("Not possible to delete_{} {} from VIM: {}".format(item, name, str(e)), e.http_code)
+
+    return "{} {} {} deleted".format(item[:-1], item_id,item_name)
+
+
+def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
+    #get datacenter info
+    logger.debug("vim_action_create descriptor %s", str(descriptor))
+    if tenant_id == "any":
+        tenant_id=None
+    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+    try:
+        if item=="networks":
+            net = descriptor["network"]
+            net_name = net.pop("name")
+            net_type = net.pop("type", "bridge")
+            net_public = net.pop("shared", False)
+            net_ipprofile = net.pop("ip_profile", None)
+            net_vlan = net.pop("vlan", None)
+            content, _ = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, vlan=net_vlan) #, **net)
+
+            #If the datacenter has a SDN controller defined and the network is of dataplane type, then create the sdn network
+            if get_sdn_controller_id(mydb, datacenter) != None and (net_type == 'data' or net_type == 'ptp'):
+                #obtain datacenter_tenant_id
+                datacenter_tenant_id = mydb.get_rows(SELECT=('uuid',),
+                                                     FROM='datacenter_tenants',
+                                                     WHERE={'datacenter_id': datacenter})[0]['uuid']
+                try:
+                    sdn_network = {}
+                    sdn_network['vlan'] = net_vlan
+                    sdn_network['type'] = net_type
+                    sdn_network['name'] = net_name
+                    sdn_network['region'] = datacenter_tenant_id
+                    ovim_content  = ovim.new_network(sdn_network)
+                except ovimException as e:
+                    logger.error("ovimException creating SDN network={} ".format(
+                        sdn_network) + str(e), exc_info=True)
+                    raise NfvoException("ovimException creating SDN network={} ".format(sdn_network) + str(e),
+                                        httperrors.Internal_Server_Error)
+
+                # Save entry in in dabase mano_db in table instance_nets to stablish a dictionary  vim_net_id <->sdn_net_id
+                # use instance_scenario_id=None to distinguish from real instaces of nets
+                correspondence = {'instance_scenario_id': None,
+                                  'sdn_net_id': ovim_content,
+                                  'vim_net_id': content,
+                                  'datacenter_tenant_id': datacenter_tenant_id
+                                  }
+                try:
+                    mydb.new_row('instance_nets', correspondence, add_uuid=True)
+                except db_base_Exception as e:
+                    raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: {}".format(
+                        correspondence, e), e.http_code)
+        elif item=="tenants":
+            tenant = descriptor["tenant"]
+            content = myvim.new_tenant(tenant["name"], tenant.get("description"))
+        else:
+            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+    except vimconn.vimconnException as e:
+        raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
+
+    return vim_action_get(mydb, tenant_id, datacenter, item, content)
+
+def sdn_controller_create(mydb, tenant_id, sdn_controller):
+    data = ovim.new_of_controller(sdn_controller)
+    logger.debug('New SDN controller created with uuid {}'.format(data))
+    return data
+
+def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
+    data = ovim.edit_of_controller(controller_id, sdn_controller)
+    msg = 'SDN controller {} updated'.format(data)
+    logger.debug(msg)
+    return msg
+
+def sdn_controller_list(mydb, tenant_id, controller_id=None):
+    if controller_id == None:
+        data = ovim.get_of_controllers()
+    else:
+        data = ovim.show_of_controller(controller_id)
+
+    msg = 'SDN controller list:\n {}'.format(data)
+    logger.debug(msg)
+    return data
+
+def sdn_controller_delete(mydb, tenant_id, controller_id):
+    select_ = ('uuid', 'config')
+    datacenters = mydb.get_rows(FROM='datacenters', SELECT=select_)
+    for datacenter in datacenters:
+        if datacenter['config']:
+            config = yaml.load(datacenter['config'], Loader=yaml.Loader)
+            if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
+                raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), httperrors.Conflict)
+
+    data = ovim.delete_of_controller(controller_id)
+    msg = 'SDN controller {} deleted'.format(data)
+    logger.debug(msg)
+    return msg
+
+def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
+    controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
+    if len(controller) < 1:
+        raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), httperrors.Not_Found)
+
+    try:
+        sdn_controller_id = yaml.load(controller[0]["config"], Loader=yaml.Loader)["sdn-controller"]
+    except:
+        raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), httperrors.Bad_Request)
+
+    sdn_controller = ovim.show_of_controller(sdn_controller_id)
+    switch_dpid = sdn_controller["dpid"]
+
+    maps = list()
+    for compute_node in sdn_port_mapping:
+        #element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
+        element = dict()
+        element["compute_node"] = compute_node["compute_node"]
+        for port in compute_node["ports"]:
+            pci = port.get("pci")
+            element["switch_port"] = port.get("switch_port")
+            element["switch_mac"] = port.get("switch_mac")
+            if not element["switch_port"] and not element["switch_mac"]:
+                raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
+            for pci_expanded in utils.expand_brackets(pci):
+                element["pci"] = pci_expanded
+                maps.append(dict(element))
+
+    return ovim.set_of_port_mapping(maps, ofc_id=sdn_controller_id, switch_dpid=switch_dpid, region=datacenter_id)
+
+def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
+    maps = ovim.get_of_port_mappings(db_filter={"region": datacenter_id})
+
+    result = {
+        "sdn-controller": None,
+        "datacenter-id": datacenter_id,
+        "dpid": None,
+        "ports_mapping": list()
+    }
+
+    datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id)
+    if datacenter['config']:
+        config = yaml.load(datacenter['config'], Loader=yaml.Loader)
+        if 'sdn-controller' in config:
+            controller_id = config['sdn-controller']
+            sdn_controller = sdn_controller_list(mydb, tenant_id, controller_id)
+            result["sdn-controller"] = controller_id
+            result["dpid"] = sdn_controller["dpid"]
+
+    if result["sdn-controller"] == None:
+        raise NfvoException("SDN controller is not defined for datacenter {}".format(datacenter_id), httperrors.Bad_Request)
+    if result["dpid"] == None:
+        raise NfvoException("It was not possible to determine DPID for SDN controller {}".format(result["sdn-controller"]),
+                        httperrors.Internal_Server_Error)
+
+    if len(maps) == 0:
+        return result
+
+    ports_correspondence_dict = dict()
+    for link in maps:
+        if result["sdn-controller"] != link["ofc_id"]:
+            raise NfvoException("The sdn-controller specified for different port mappings differ", httperrors.Internal_Server_Error)
+        if result["dpid"] != link["switch_dpid"]:
+            raise NfvoException("The dpid specified for different port mappings differ", httperrors.Internal_Server_Error)
+        element = dict()
+        element["pci"] = link["pci"]
+        if link["switch_port"]:
+            element["switch_port"] = link["switch_port"]
+        if link["switch_mac"]:
+            element["switch_mac"] = link["switch_mac"]
+
+        if not link["compute_node"] in ports_correspondence_dict:
+            content = dict()
+            content["compute_node"] = link["compute_node"]
+            content["ports"] = list()
+            ports_correspondence_dict[link["compute_node"]] = content
+
+        ports_correspondence_dict[link["compute_node"]]["ports"].append(element)
+
+    for key in sorted(ports_correspondence_dict):
+        result["ports_mapping"].append(ports_correspondence_dict[key])
+
+    return result
+
+def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
+    return ovim.clear_of_port_mapping(db_filter={"region":datacenter_id})
+
+def create_RO_keypair(tenant_id):
+    """
+    Creates a public / private keys for a RO tenant and returns their values
+    Params:
+        tenant_id: ID of the tenant
+    Return:
+        public_key: Public key for the RO tenant
+        private_key: Encrypted private key for RO tenant
+    """
+
+    bits = 2048
+    key = RSA.generate(bits)
+    try:
+        public_key = key.publickey().exportKey('OpenSSH')
+        if isinstance(public_key, ValueError):
+            raise NfvoException("Unable to create public key: {}".format(public_key), httperrors.Internal_Server_Error)
+        private_key = key.exportKey(passphrase=tenant_id, pkcs=8)
+    except (ValueError, NameError) as e:
+        raise NfvoException("Unable to create private key: {}".format(e), httperrors.Internal_Server_Error)
+    return public_key, private_key
+
+def decrypt_key (key, tenant_id):
+    """
+    Decrypts an encrypted RSA key
+    Params:
+        key: Private key to be decrypted
+        tenant_id: ID of the tenant
+    Return:
+        unencrypted_key: Unencrypted private key for RO tenant
+    """
+    try:
+        key = RSA.importKey(key,tenant_id)
+        unencrypted_key = key.exportKey('PEM')
+        if isinstance(unencrypted_key, ValueError):
+            raise NfvoException("Unable to decrypt the private key: {}".format(unencrypted_key), httperrors.Internal_Server_Error)
+    except ValueError as e:
+        raise NfvoException("Unable to decrypt the private key: {}".format(e), httperrors.Internal_Server_Error)
+    return unencrypted_key
diff --git a/RO/osm_ro/nfvo_db.py b/RO/osm_ro/nfvo_db.py
new file mode 100644 (file)
index 0000000..afd9d15
--- /dev/null
@@ -0,0 +1,1175 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+NFVO DB engine. It implements all the methods to interact with the Openmano Database
+"""
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ ="$28-aug-2014 10:05:01$"
+
+from osm_ro import db_base
+import MySQLdb as mdb
+import json
+import yaml
+import time
+#import sys, os
+
+from osm_ro.db_base import retry, with_transaction
+from osm_ro.http_tools import errors as httperrors
+from osm_ro.utils import Attempt
+
+
+_ATTEMPT = Attempt()
+
+
+tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
+                           "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
+                           "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
+                           "instance_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
+                           "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
+                           "instance_classifications", "instance_sfps", "wims", "wim_accounts", "wim_nfvo_tenants",
+                           "wim_port_mappings", "vim_wim_actions",
+                           "instance_wim_nets"]
+
+
+class nfvo_db(db_base.db_base):
+    def __init__(self, host=None, user=None, passwd=None, database=None,
+                 log_name='openmano.db', log_level=None, lock=None):
+        db_base.db_base.__init__(self, host, user, passwd, database,
+                                 log_name, log_level, lock)
+        db_base.db_base.tables_with_created_field=tables_with_createdat_field
+        return
+
+    @retry
+    @with_transaction
+    def new_vnf_as_a_whole(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
+        self.logger.debug("Adding new vnf to the NFVO database")
+        created_time = time.time()
+        myVNFDict = {}
+        myVNFDict["name"] = vnf_name
+        myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+        myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+        myVNFDict["description"] = vnf_descriptor['vnf']['description']
+        myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+        myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+        vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+        #print "Adding new vms to the NFVO database"
+        #For each vm, we must create the appropriate vm in the NFVO database.
+        vmDict = {}
+        for _,vm in VNFCDict.items():
+            #This code could make the name of the vms grow and grow.
+            #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+            #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+            #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+            vm["vnf_id"] = vnf_id
+            created_time += 0.00001
+            vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+            #print "Internal vm id in NFVO DB: %s" % vm_id
+            vmDict[vm['name']] = vm_id
+
+        #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+        bridgeInterfacesDict = {}
+        for vm in vnf_descriptor['vnf']['VNFC']:
+            if 'bridge-ifaces' in  vm:
+                bridgeInterfacesDict[vm['name']] = {}
+                for bridgeiface in vm['bridge-ifaces']:
+                    created_time += 0.00001
+                    if 'port-security' in bridgeiface:
+                        bridgeiface['port_security'] = bridgeiface.pop('port-security')
+                    if 'floating-ip' in bridgeiface:
+                        bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+                    db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {}
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None)
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None)
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None)
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None)
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['port_security'] = \
+                        int(bridgeiface.get('port_security', True))
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['floating_ip'] = \
+                        int(bridgeiface.get('floating_ip', False))
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['created_time'] = created_time
+
+        # Collect the data interfaces of each VM/VNFC under the 'numas' field
+        dataifacesDict = {}
+        for vm in vnf_descriptor['vnf']['VNFC']:
+            dataifacesDict[vm['name']] = {}
+            for numa in vm.get('numas', []):
+                for dataiface in numa.get('interfaces', []):
+                    created_time += 0.00001
+                    db_base._convert_bandwidth(dataiface, logger=self.logger)
+                    dataifacesDict[vm['name']][dataiface['name']] = {}
+                    dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface.get('vpci')
+                    dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth']
+                    dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface[
+                                                                                            'dedicated'] == "yes" else (
+                    "VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+                    dataifacesDict[vm['name']][dataiface['name']]['created_time'] = created_time
+
+        #For each internal connection, we add it to the interfaceDict and we  create the appropriate net in the NFVO database.
+        #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+        internalconnList = []
+        if 'internal-connections' in vnf_descriptor['vnf']:
+            for net in vnf_descriptor['vnf']['internal-connections']:
+                #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+                myNetDict = {}
+                myNetDict["name"] = net['name']
+                myNetDict["description"] = net['description']
+                myNetDict["type"] = net['type']
+                myNetDict["vnf_id"] = vnf_id
+
+                created_time += 0.00001
+                net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+                for element in net['elements']:
+                    ifaceItem = {}
+                    #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+                    ifaceItem["internal_name"] = element['local_iface_name']
+                    #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+                    ifaceItem["vm_id"] = vmDict[element['VNFC']]
+                    ifaceItem["net_id"] = net_id
+                    ifaceItem["type"] = net['type']
+                    if ifaceItem ["type"] == "data":
+                        dataiface = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+                        ifaceItem["vpci"] =  dataiface['vpci']
+                        ifaceItem["bw"] =    dataiface['bw']
+                        ifaceItem["model"] = dataiface['model']
+                        created_time_iface = dataiface['created_time']
+                    else:
+                        bridgeiface =  bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+                        ifaceItem["vpci"]          = bridgeiface['vpci']
+                        ifaceItem["mac"]           = bridgeiface['mac']
+                        ifaceItem["bw"]            = bridgeiface['bw']
+                        ifaceItem["model"]         = bridgeiface['model']
+                        ifaceItem["port_security"] = bridgeiface['port_security']
+                        ifaceItem["floating_ip"]   = bridgeiface['floating_ip']
+                        created_time_iface = bridgeiface['created_time']
+                    internalconnList.append(ifaceItem)
+                #print "Internal net id in NFVO DB: %s" % net_id
+
+        #print "Adding internal interfaces to the NFVO database (if any)"
+        for iface in internalconnList:
+            #print "Iface name: %s" % iface['internal_name']
+            iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+            #print "Iface id in NFVO DB: %s" % iface_id
+
+        #print "Adding external interfaces to the NFVO database"
+        for iface in vnf_descriptor['vnf']['external-connections']:
+            myIfaceDict = {}
+            #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+            myIfaceDict["internal_name"] = iface['local_iface_name']
+            #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+            myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+            myIfaceDict["external_name"] = iface['name']
+            myIfaceDict["type"] = iface['type']
+            if iface["type"] == "data":
+                dataiface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+                myIfaceDict["vpci"]         = dataiface['vpci']
+                myIfaceDict["bw"]           = dataiface['bw']
+                myIfaceDict["model"]        = dataiface['model']
+                created_time_iface = dataiface['created_time']
+            else:
+                bridgeiface = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
+                myIfaceDict["vpci"]         = bridgeiface['vpci']
+                myIfaceDict["bw"]           = bridgeiface['bw']
+                myIfaceDict["model"]        = bridgeiface['model']
+                myIfaceDict["mac"]          = bridgeiface['mac']
+                myIfaceDict["port_security"]= bridgeiface['port_security']
+                myIfaceDict["floating_ip"]  = bridgeiface['floating_ip']
+                created_time_iface = bridgeiface['created_time']
+            #print "Iface name: %s" % iface['name']
+            iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
+            #print "Iface id in NFVO DB: %s" % iface_id
+
+        return vnf_id
+
+    @retry
+    @with_transaction
+    def new_vnf_as_a_whole2(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
+        self.logger.debug("Adding new vnf to the NFVO database")
+        created_time = time.time()
+        myVNFDict = {}
+        myVNFDict["name"] = vnf_name
+        myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
+        myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
+        myVNFDict["description"] = vnf_descriptor['vnf']['description']
+        myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
+        myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
+
+        vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
+        #print "Adding new vms to the NFVO database"
+        #For each vm, we must create the appropriate vm in the NFVO database.
+        vmDict = {}
+        for _,vm in VNFCDict.items():
+            #This code could make the name of the vms grow and grow.
+            #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
+            #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
+            #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
+            vm["vnf_id"] = vnf_id
+            created_time += 0.00001
+            vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+            #print "Internal vm id in NFVO DB: %s" % vm_id
+            vmDict[vm['name']] = vm_id
+
+        #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
+        bridgeInterfacesDict = {}
+        for vm in vnf_descriptor['vnf']['VNFC']:
+            if 'bridge-ifaces' in  vm:
+                bridgeInterfacesDict[vm['name']] = {}
+                for bridgeiface in vm['bridge-ifaces']:
+                    created_time += 0.00001
+                    db_base._convert_bandwidth(bridgeiface, logger=self.logger)
+                    if 'port-security' in bridgeiface:
+                        bridgeiface['port_security'] = bridgeiface.pop('port-security')
+                    if 'floating-ip' in bridgeiface:
+                        bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
+                    ifaceDict = {}
+                    ifaceDict['vpci'] = bridgeiface.get('vpci',None)
+                    ifaceDict['mac'] = bridgeiface.get('mac_address',None)
+                    ifaceDict['bw'] = bridgeiface.get('bandwidth', None)
+                    ifaceDict['model'] = bridgeiface.get('model', None)
+                    ifaceDict['port_security'] = int(bridgeiface.get('port_security', True))
+                    ifaceDict['floating_ip'] = int(bridgeiface.get('floating_ip', False))
+                    ifaceDict['created_time'] = created_time
+                    bridgeInterfacesDict[vm['name']][bridgeiface['name']] = ifaceDict
+
+        # Collect the data interfaces of each VM/VNFC under the 'numas' field
+        dataifacesDict = {}
+        for vm in vnf_descriptor['vnf']['VNFC']:
+            dataifacesDict[vm['name']] = {}
+            for numa in vm.get('numas', []):
+                for dataiface in numa.get('interfaces', []):
+                    created_time += 0.00001
+                    db_base._convert_bandwidth(dataiface, logger=self.logger)
+                    ifaceDict = {}
+                    ifaceDict['vpci'] = dataiface.get('vpci')
+                    ifaceDict['bw'] = dataiface['bandwidth']
+                    ifaceDict['model'] = "PF" if dataiface['dedicated'] == "yes" else \
+                        ("VF" if dataiface['dedicated'] == "no" else "VFnotShared")
+                    ifaceDict['created_time'] = created_time
+                    dataifacesDict[vm['name']][dataiface['name']] = ifaceDict
+
+        #For each internal connection, we add it to the interfaceDict and we  create the appropriate net in the NFVO database.
+        #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
+        if 'internal-connections' in vnf_descriptor['vnf']:
+            for net in vnf_descriptor['vnf']['internal-connections']:
+                #print "Net name: %s. Description: %s" % (net['name'], net['description'])
+
+                myNetDict = {}
+                myNetDict["name"] = net['name']
+                myNetDict["description"] = net['description']
+                if (net["implementation"] == "overlay"):
+                    net["type"] = "bridge"
+                    #It should give an error if the type is e-line. For the moment, we consider it as a bridge
+                elif (net["implementation"] == "underlay"):
+                    if (net["type"] == "e-line"):
+                        net["type"] = "ptp"
+                    elif (net["type"] == "e-lan"):
+                        net["type"] = "data"
+                net.pop("implementation")
+                myNetDict["type"] = net['type']
+                myNetDict["vnf_id"] = vnf_id
+
+                created_time += 0.00001
+                net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
+
+                if "ip-profile" in net:
+                    ip_profile = net["ip-profile"]
+                    myIPProfileDict = {}
+                    myIPProfileDict["net_id"] = net_id
+                    myIPProfileDict["ip_version"] = ip_profile.get('ip-version',"IPv4")
+                    myIPProfileDict["subnet_address"] = ip_profile.get('subnet-address',None)
+                    myIPProfileDict["gateway_address"] = ip_profile.get('gateway-address',None)
+                    myIPProfileDict["dns_address"] = ip_profile.get('dns-address',None)
+                    if ("dhcp" in ip_profile):
+                        myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
+                        myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
+                        myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
+
+                    created_time += 0.00001
+                    ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
+
+                for element in net['elements']:
+                    ifaceItem = {}
+                    #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
+                    ifaceItem["internal_name"] = element['local_iface_name']
+                    #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
+                    ifaceItem["vm_id"] = vmDict[element['VNFC']]
+                    ifaceItem["net_id"] = net_id
+                    ifaceItem["type"] = net['type']
+                    ifaceItem["ip_address"] = element.get('ip_address',None)
+                    if ifaceItem ["type"] == "data":
+                        ifaceDict = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+                        ifaceItem["vpci"] =  ifaceDict['vpci']
+                        ifaceItem["bw"] =    ifaceDict['bw']
+                        ifaceItem["model"] = ifaceDict['model']
+                    else:
+                        ifaceDict = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
+                        ifaceItem["vpci"] =  ifaceDict['vpci']
+                        ifaceItem["mac"] =  ifaceDict['mac']
+                        ifaceItem["bw"] =    ifaceDict['bw']
+                        ifaceItem["model"] = ifaceDict['model']
+                        ifaceItem["port_security"] = ifaceDict['port_security']
+                        ifaceItem["floating_ip"] = ifaceDict['floating_ip']
+                    created_time_iface = ifaceDict["created_time"]
+                    #print "Iface name: %s" % iface['internal_name']
+                    iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+                    #print "Iface id in NFVO DB: %s" % iface_id
+
+        #print "Adding external interfaces to the NFVO database"
+        for iface in vnf_descriptor['vnf']['external-connections']:
+            myIfaceDict = {}
+            #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
+            myIfaceDict["internal_name"] = iface['local_iface_name']
+            #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
+            myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
+            myIfaceDict["external_name"] = iface['name']
+            myIfaceDict["type"] = iface['type']
+            if iface["type"] == "data":
+                myIfaceDict["vpci"]  = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+                myIfaceDict["bw"]    = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+                myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+                created_time_iface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['created_time']
+            else:
+                myIfaceDict["vpci"]  = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
+                myIfaceDict["bw"]    = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
+                myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
+                myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac']
+                myIfaceDict["port_security"] = \
+                    bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['port_security']
+                myIfaceDict["floating_ip"] = \
+                    bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['floating_ip']
+                created_time_iface = bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['created_time']
+            #print "Iface name: %s" % iface['name']
+            iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
+            #print "Iface id in NFVO DB: %s" % iface_id
+
+        return vnf_id
+
+#             except KeyError as e2:
+#                 exc_type, exc_obj, exc_tb = sys.exc_info()
+#                 fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
+#                 self.logger.debug("Exception type: %s; Filename: %s; Line number: %s", exc_type, fname, exc_tb.tb_lineno)
+#                 raise KeyError
+
+    @retry
+    @with_transaction
+    def new_scenario(self, scenario_dict):
+        created_time = time.time()
+        tenant_id = scenario_dict.get('tenant_id')
+        #scenario
+        INSERT_={'tenant_id': tenant_id,
+                    'name': scenario_dict['name'],
+                    'description': scenario_dict['description'],
+                    'public': scenario_dict.get('public', "false")}
+
+        scenario_uuid =  self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+        #sce_nets
+        for net in scenario_dict['nets'].values():
+            net_dict={'scenario_id': scenario_uuid}
+            net_dict["name"] = net["name"]
+            net_dict["type"] = net["type"]
+            net_dict["description"] = net.get("description")
+            net_dict["external"] = net.get("external", False)
+            if "graph" in net:
+                #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256)
+                #TODO, must be json because of the GUI, change to yaml
+                net_dict["graph"]=json.dumps(net["graph"])
+            created_time += 0.00001
+            net_uuid =  self._new_row_internal('sce_nets', net_dict, add_uuid=True, root_uuid=scenario_uuid, created_time=created_time)
+            net['uuid']=net_uuid
+
+            if net.get("ip-profile"):
+                ip_profile = net["ip-profile"]
+                myIPProfileDict = {
+                    "sce_net_id": net_uuid,
+                    "ip_version": ip_profile.get('ip-version', "IPv4"),
+                    "subnet_address": ip_profile.get('subnet-address'),
+                    "gateway_address": ip_profile.get('gateway-address'),
+                    "dns_address": ip_profile.get('dns-address')}
+                if "dhcp" in ip_profile:
+                    myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled', "true")
+                    myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address')
+                    myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count')
+                self._new_row_internal('ip_profiles', myIPProfileDict)
+
+        # sce_vnfs
+        for k, vnf in scenario_dict['vnfs'].items():
+            INSERT_ = {'scenario_id': scenario_uuid,
+                        'name': k,
+                        'vnf_id': vnf['uuid'],
+                        # 'description': scenario_dict['name']
+                        'description': vnf['description']}
+            if "graph" in vnf:
+                #I NSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256)
+                # TODO, must be json because of the GUI, change to yaml
+                INSERT_["graph"] = json.dumps(vnf["graph"])
+            created_time += 0.00001
+            scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, add_uuid=True,
+                                                    root_uuid=scenario_uuid, created_time=created_time)
+            vnf['scn_vnf_uuid']=scn_vnf_uuid
+            # sce_interfaces
+            for iface in vnf['ifaces'].values():
+                # print 'iface', iface
+                if 'net_key' not in iface:
+                    continue
+                iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid']
+                INSERT_={'sce_vnf_id': scn_vnf_uuid,
+                            'sce_net_id': iface['net_id'],
+                            'interface_id':  iface['uuid'],
+                            'ip_address': iface.get('ip_address')}
+                created_time += 0.00001
+                iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
+                                                        root_uuid=scenario_uuid, created_time=created_time)
+
+        return scenario_uuid
+
+    @retry
+    @with_transaction
+    def edit_scenario(self, scenario_dict):
+        modified_time = time.time()
+        item_changed=0
+        #check that scenario exist
+        tenant_id = scenario_dict.get('tenant_id')
+        scenario_uuid = scenario_dict['uuid']
+
+        where_text = "uuid='{}'".format(scenario_uuid)
+        if not tenant_id and tenant_id != "any":
+            where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+        cmd = "SELECT * FROM scenarios WHERE "+ where_text
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        self.cur.fetchall()
+        if self.cur.rowcount==0:
+            raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
+        elif self.cur.rowcount>1:
+            raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
+
+        #scenario
+        nodes = {}
+        topology = scenario_dict.pop("topology", None)
+        if topology != None and "nodes" in topology:
+            nodes = topology.get("nodes",{})
+        UPDATE_ = {}
+        if "name" in scenario_dict:        UPDATE_["name"] = scenario_dict["name"]
+        if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"]
+        if len(UPDATE_)>0:
+            WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid}
+            item_changed += self._update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time)
+        #sce_nets
+        for node_id, node in nodes.items():
+            if "graph" in node:
+                #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256)
+                #TODO, must be json because of the GUI, change to yaml
+                node["graph"] = json.dumps(node["graph"])
+            WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id}
+            #Try to change at sce_nets(version 0 API backward compatibility and sce_vnfs)
+            item_changed += self._update_rows('sce_nets', node, WHERE_)
+            item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
+        return item_changed
+
+#     def get_instance_scenario(self, instance_scenario_id, tenant_id=None):
+#         '''Obtain the scenario instance information, filtering by one or several of the tenant, uuid or name
+#         instance_scenario_id is the uuid or the name if it is not a valid uuid format
+#         Only one scenario isntance must mutch the filtering or an error is returned
+#         '''
+#         print "1******************************************************************"
+#         try:
+#             with self.transaction(mdb.cursors.DictCursor):
+#                 #scenario table
+#                 where_list=[]
+#                 if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+#                 if db_base._check_valid_uuid(instance_scenario_id):
+#                     where_list.append( "uuid='" + instance_scenario_id +"'" )
+#                 else:
+#                     where_list.append( "name='" + instance_scenario_id +"'" )
+#                 where_text = " AND ".join(where_list)
+#                 self.cur.execute("SELECT * FROM instance_scenarios WHERE "+ where_text)
+#                 rows = self.cur.fetchall()
+#                 if self.cur.rowcount==0:
+#                     return -httperrors.Bad_Request, "No scenario instance found with this criteria " + where_text
+#                 elif self.cur.rowcount>1:
+#                     return -httperrors.Bad_Request, "More than one scenario instance found with this criteria " + where_text
+#                 instance_scenario_dict = rows[0]
+#
+#                 #instance_vnfs
+#                 self.cur.execute("SELECT uuid,vnf_id FROM instance_vnfs WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+#                 instance_scenario_dict['instance_vnfs'] = self.cur.fetchall()
+#                 for vnf in instance_scenario_dict['instance_vnfs']:
+#                     #instance_vms
+#                     self.cur.execute("SELECT uuid, vim_vm_id "+
+#                                 "FROM instance_vms  "+
+#                                 "WHERE instance_vnf_id='" + vnf['uuid'] +"'"
+#                                 )
+#                     vnf['instance_vms'] = self.cur.fetchall()
+#                 #instance_nets
+#                 self.cur.execute("SELECT uuid, vim_net_id FROM instance_nets WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+#                 instance_scenario_dict['instance_nets'] = self.cur.fetchall()
+#
+#                 #instance_interfaces
+#                 self.cur.execute("SELECT uuid, vim_interface_id, instance_vm_id, instance_net_id FROM instance_interfaces WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
+#                 instance_scenario_dict['instance_interfaces'] = self.cur.fetchall()
+#
+#                 db_base._convert_datetime2str(instance_scenario_dict)
+#                 db_base._convert_str2boolean(instance_scenario_dict, ('public','shared','external') )
+#                 print "2******************************************************************"
+#                 return 1, instance_scenario_dict
+#         except (mdb.Error, AttributeError) as e:
+#             print "nfvo_db.get_instance_scenario DB Exception %d: %s" % (e.args[0], e.args[1])
+#             return self._format_error(e)
+
+    @retry
+    @with_transaction(cursor='dict')
+    def get_scenario(self, scenario_id, tenant_id=None, datacenter_vim_id=None, datacenter_id=None):
+        '''Obtain the scenario information, filtering by one or several of the tenant, uuid or name
+        scenario_id is the uuid or the name if it is not a valid uuid format
+        if datacenter_vim_id,d datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid
+        Only one scenario must mutch the filtering or an error is returned
+        '''
+        where_text = "uuid='{}'".format(scenario_id)
+        if not tenant_id and tenant_id != "any":
+            where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+        cmd = "SELECT * FROM scenarios WHERE " + where_text
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+        if self.cur.rowcount==0:
+            raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
+        elif self.cur.rowcount>1:
+            raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
+        scenario_dict = rows[0]
+        if scenario_dict["cloud_config"]:
+            scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"], Loader=yaml.Loader)
+        del scenario_dict["cloud_config"]
+        # sce_vnfs
+        cmd = "SELECT uuid,name,member_vnf_index,vnf_id,description FROM sce_vnfs WHERE scenario_id='{}' "\
+                "ORDER BY created_at".format(scenario_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        scenario_dict['vnfs'] = self.cur.fetchall()
+
+        for vnf in scenario_dict['vnfs']:
+            cmd = "SELECT mgmt_access FROM vnfs WHERE uuid='{}'".format(scenario_dict['vnfs'][0]['vnf_id'])
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            mgmt_access_dict = self.cur.fetchall()
+            if mgmt_access_dict[0].get('mgmt_access'):
+                vnf['mgmt_access'] = yaml.load(mgmt_access_dict[0]['mgmt_access'], Loader=yaml.Loader)
+            else:
+                vnf['mgmt_access'] = None
+            # sce_interfaces
+            cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
+                    " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
+                    " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            vnf['interfaces'] = self.cur.fetchall()
+            # vms
+            cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, image_list, vms.name as name," \
+                    " vms.description as description, vms.boot_data as boot_data, count," \
+                    " vms.availability_zone as availability_zone, vms.osm_id as osm_id, vms.pdu_type" \
+                    " FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
+                    " WHERE vnfs.uuid='" + vnf['vnf_id'] + "'"  \
+                    " ORDER BY vms.created_at"
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            vnf['vms'] = self.cur.fetchall()
+            for vm in vnf['vms']:
+                if vm["boot_data"]:
+                    vm["boot_data"] = yaml.safe_load(vm["boot_data"])
+                else:
+                    del vm["boot_data"]
+                if vm["image_list"]:
+                    vm["image_list"] = yaml.safe_load(vm["image_list"])
+                else:
+                    del vm["image_list"]
+                if datacenter_vim_id!=None:
+                    if vm['image_id']:
+                        cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND " \
+                                "datacenter_vim_id='{}'".format(vm['image_id'], datacenter_vim_id)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        if self.cur.rowcount==1:
+                            vim_image_dict = self.cur.fetchone()
+                            vm['vim_image_id']=vim_image_dict['vim_id']
+                    if vm['flavor_id']:
+                        cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND " \
+                                "datacenter_vim_id='{}'".format(vm['flavor_id'], datacenter_vim_id)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        if self.cur.rowcount==1:
+                            vim_flavor_dict = self.cur.fetchone()
+                            vm['vim_flavor_id']=vim_flavor_dict['vim_id']
+
+                #interfaces
+                cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
+                        "floating_ip, port_security" \
+                        " FROM interfaces" \
+                        " WHERE vm_id='{}'" \
+                        " ORDER BY created_at".format(vm['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                vm['interfaces'] = self.cur.fetchall()
+                for iface in vm['interfaces']:
+                    iface['port-security'] = iface.pop("port_security")
+                    iface['floating-ip'] = iface.pop("floating_ip")
+                    for sce_interface in vnf["interfaces"]:
+                        if sce_interface["interface_id"] == iface["uuid"]:
+                            if sce_interface["ip_address"]:
+                                iface["ip_address"] = sce_interface["ip_address"]
+                            break
+            #nets    every net of a vms
+            cmd = "SELECT uuid,name,type,description, osm_id FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            vnf['nets'] = self.cur.fetchall()
+            for vnf_net in vnf['nets']:
+                SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+                cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                ipprofiles = self.cur.fetchall()
+                if self.cur.rowcount==1:
+                    vnf_net["ip_profile"] = ipprofiles[0]
+                elif self.cur.rowcount>1:
+                    raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), httperrors.Bad_Request)
+
+        #sce_nets
+        cmd = "SELECT uuid,name,type,external,description,vim_network_name, osm_id" \
+                " FROM sce_nets  WHERE scenario_id='{}'" \
+                " ORDER BY created_at ".format(scenario_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        scenario_dict['nets'] = self.cur.fetchall()
+        #datacenter_nets
+        for net in scenario_dict['nets']:
+            if str(net['external']) == 'false':
+                SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
+                cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                ipprofiles = self.cur.fetchall()
+                if self.cur.rowcount==1:
+                    net["ip_profile"] = ipprofiles[0]
+                elif self.cur.rowcount>1:
+                    raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
+                continue
+            WHERE_=" WHERE name='{}'".format(net['name'])
+            if datacenter_id!=None:
+                WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
+            cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            d_net = self.cur.fetchone()
+            if d_net==None or datacenter_vim_id==None:
+                #print "nfvo_db.get_scenario() WARNING external net %s not found"  % net['name']
+                net['vim_id']=None
+            else:
+                net['vim_id']=d_net['vim_net_id']
+
+        db_base._convert_datetime2str(scenario_dict)
+        db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+
+        #forwarding graphs
+        cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
+                "ORDER BY created_at".format(scenario_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        scenario_dict['vnffgs'] = self.cur.fetchall()
+        for vnffg in scenario_dict['vnffgs']:
+            cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
+                    "ORDER BY created_at".format(vnffg['uuid'])
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            vnffg['rsps'] = self.cur.fetchall()
+            for rsp in vnffg['rsps']:
+                cmd = "SELECT uuid,if_order,ingress_interface_id,egress_interface_id,sce_vnf_id " \
+                        "FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
+                        "ORDER BY created_at".format(rsp['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                rsp['connection_points'] = self.cur.fetchall();
+                cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
+                        "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                rsp['classifier'] = self.cur.fetchone();
+                cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
+                        "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                rsp['classifier']['matches'] = self.cur.fetchall()
+
+        return scenario_dict
+
+    @retry(command="delete", extra="instances running")
+    @with_transaction(cursor='dict')
+    def delete_scenario(self, scenario_id, tenant_id=None):
+        '''Deletes a scenario, filtering by one or several of the tenant, uuid or name
+        scenario_id is the uuid or the name if it is not a valid uuid format
+        Only one scenario must mutch the filtering or an error is returned
+        '''
+        #scenario table
+        where_text = "uuid='{}'".format(scenario_id)
+        if not tenant_id and tenant_id != "any":
+            where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
+        cmd = "SELECT * FROM scenarios WHERE "+ where_text
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+        if self.cur.rowcount==0:
+            raise db_base.db_base_Exception("No scenario found where " + where_text, httperrors.Not_Found)
+        elif self.cur.rowcount>1:
+            raise db_base.db_base_Exception("More than one scenario found where " + where_text, httperrors.Conflict)
+        scenario_uuid = rows[0]["uuid"]
+        scenario_name = rows[0]["name"]
+
+        #sce_vnfs
+        cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+
+        return scenario_uuid + " " + scenario_name
+
+    @retry
+    @with_transaction
+    def new_rows(self, tables, uuid_list=None, confidential_data=False, attempt=_ATTEMPT):
+        """
+        Make a transactional insertion of rows at several tables. Can be also a deletion
+        :param tables: list with dictionary where the keys are the table names and the values are a row or row list
+            with the values to be inserted at the table. Each row is a dictionary with the key values. E.g.:
+            tables = [
+                {"table1": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
+                {"table2": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
+                {"table3": {"column1": value, "column2: value, ... }
+            }
+            If tables does not contain the 'created_at', it is generated incrementally with the order of tables. You can
+            provide a integer value, that it is an index multiply by 0.00001 to add to the created time to manually set
+            up and order
+            If dict contains {"TO-DELETE": uuid} the entry is deleted if exist instead of inserted
+        :param uuid_list: list of created uuids, first one is the root (#TODO to store at uuid table)
+        :return: None if success,  raise exception otherwise
+        """
+        table_name = None
+        created_time = time.time()
+        for table in tables:
+            for table_name, row_list in table.items():
+                index = 0
+                attempt.info['table'] = table_name
+                if isinstance(row_list, dict):
+                    row_list = (row_list, )  #create a list with the single value
+                for row in row_list:
+                    if "TO-DELETE" in row:
+                        self._delete_row_by_id_internal(table_name, row["TO-DELETE"])
+                        continue
+                    if table_name in self.tables_with_created_field:
+                        if "created_at" in row:
+                            created_time_param = created_time + (index + row.pop("created_at"))*0.00001
+                        else:
+                            created_time_param = created_time + index*0.00001
+                        index += 1
+                    else:
+                        created_time_param = 0
+                    self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
+                                            confidential_data=confidential_data,
+                                            created_time=created_time_param)
+
+    @retry
+    @with_transaction
+    def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
+        created_time = time.time()
+        #instance_scenarios
+        datacenter_id = scenarioDict['datacenter_id']
+        INSERT_={'tenant_id': tenant_id,
+            'datacenter_tenant_id': scenarioDict["datacenter2tenant"][datacenter_id],
+            'name': instance_scenario_name,
+            'description': instance_scenario_description,
+            'scenario_id' : scenarioDict['uuid'],
+            'datacenter_id': datacenter_id
+        }
+        if scenarioDict.get("cloud-config"):
+            INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
+
+        instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
+
+        net_scene2instance={}
+        #instance_nets   #nets interVNF
+        for net in scenarioDict['nets']:
+            net_scene2instance[ net['uuid'] ] ={}
+            datacenter_site_id = net.get('datacenter_id', datacenter_id)
+            if not "vim_id_sites" in net:
+                net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
+                net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
+            sce_net_id = net.get("uuid")
+
+            for datacenter_site_id,vim_id in net["vim_id_sites"].items():
+                INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #,  'type': net['type']
+                INSERT_['datacenter_id'] = datacenter_site_id
+                INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+                if not net.get('created', False):
+                    INSERT_['status'] = "ACTIVE"
+                if sce_net_id:
+                    INSERT_['sce_net_id'] = sce_net_id
+                created_time += 0.00001
+                instance_net_uuid =  self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+                net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
+                net['uuid'] = instance_net_uuid  #overwrite scnario uuid by instance uuid
+
+            if 'ip_profile' in net:
+                net['ip_profile']['net_id'] = None
+                net['ip_profile']['sce_net_id'] = None
+                net['ip_profile']['instance_net_id'] = instance_net_uuid
+                created_time += 0.00001
+                ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+        #instance_vnfs
+        for vnf in scenarioDict['vnfs']:
+            datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
+            INSERT_={'instance_scenario_id': instance_uuid,  'vnf_id': vnf['vnf_id']  }
+            INSERT_['datacenter_id'] = datacenter_site_id
+            INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+            if vnf.get("uuid"):
+                INSERT_['sce_vnf_id'] = vnf['uuid']
+            created_time += 0.00001
+            instance_vnf_uuid =  self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
+            vnf['uuid'] = instance_vnf_uuid  #overwrite scnario uuid by instance uuid
+
+            #instance_nets   #nets intraVNF
+            for net in vnf['nets']:
+                net_scene2instance[ net['uuid'] ] = {}
+                INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid  } #,  'type': net['type']
+                INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
+                INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
+                if net.get("uuid"):
+                    INSERT_['net_id'] = net['uuid']
+                created_time += 0.00001
+                instance_net_uuid =  self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
+                net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
+                net['uuid'] = instance_net_uuid  #overwrite scnario uuid by instance uuid
+
+                if 'ip_profile' in net:
+                    net['ip_profile']['net_id'] = None
+                    net['ip_profile']['sce_net_id'] = None
+                    net['ip_profile']['instance_net_id'] = instance_net_uuid
+                    created_time += 0.00001
+                    ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
+
+            #instance_vms
+            for vm in vnf['vms']:
+                INSERT_={'instance_vnf_id': instance_vnf_uuid,  'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id']  }
+                created_time += 0.00001
+                instance_vm_uuid =  self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
+                vm['uuid'] = instance_vm_uuid  #overwrite scnario uuid by instance uuid
+
+                #instance_interfaces
+                for interface in vm['interfaces']:
+                    net_id = interface.get('net_id', None)
+                    if net_id is None:
+                        #check if is connected to a inter VNFs net
+                        for iface in vnf['interfaces']:
+                            if iface['interface_id'] == interface['uuid']:
+                                if 'ip_address' in iface:
+                                    interface['ip_address'] = iface['ip_address']
+                                net_id = iface.get('sce_net_id', None)
+                                break
+                    if net_id is None:
+                        continue
+                    interface_type='external' if interface['external_name'] is not None else 'internal'
+                    INSERT_={'instance_vm_id': instance_vm_uuid,  'instance_net_id': net_scene2instance[net_id][datacenter_site_id],
+                        'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type':  interface_type,
+                        'ip_address': interface.get('ip_address'), 'floating_ip': int(interface.get('floating-ip',False)),
+                        'port_security': int(interface.get('port-security',True))}
+                    #created_time += 0.00001
+                    interface_uuid =  self._new_row_internal('instance_interfaces', INSERT_, True, instance_uuid) #, created_time)
+                    interface['uuid'] = interface_uuid  #overwrite scnario uuid by instance uuid
+        return instance_uuid
+
+    @retry
+    @with_transaction(cursor='dict')
+    def get_instance_scenario(self, instance_id, tenant_id=None, verbose=False):
+        '''Obtain the instance information, filtering by one or several of the tenant, uuid or name
+        instance_id is the uuid or the name if it is not a valid uuid format
+        Only one instance must mutch the filtering or an error is returned
+        '''
+        # instance table
+        where_list = []
+        if tenant_id:
+            where_list.append("inst.tenant_id='{}'".format(tenant_id))
+        if db_base._check_valid_uuid(instance_id):
+            where_list.append("inst.uuid='{}'".format(instance_id))
+        else:
+            where_list.append("inst.name='{}'".format(instance_id))
+        where_text = " AND ".join(where_list)
+        cmd = "SELECT inst.uuid as uuid, inst.name as name, inst.scenario_id as scenario_id, datacenter_id"\
+                    " ,datacenter_tenant_id, s.name as scenario_name,inst.tenant_id as tenant_id" \
+                    " ,inst.description as description, inst.created_at as created_at" \
+                    " ,inst.cloud_config as cloud_config, s.osm_id as nsd_osm_id" \
+                " FROM instance_scenarios as inst left join scenarios as s on inst.scenario_id=s.uuid" \
+                " WHERE " + where_text
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+
+        if self.cur.rowcount == 0:
+            raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Not_Found)
+        elif self.cur.rowcount > 1:
+            raise db_base.db_base_Exception("More than one instance found where " + where_text,
+                                            httperrors.Bad_Request)
+        instance_dict = rows[0]
+        if instance_dict["cloud_config"]:
+            instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"], Loader=yaml.Loader)
+        del instance_dict["cloud_config"]
+
+        # instance_vnfs
+        cmd = "SELECT iv.uuid as uuid, iv.vnf_id as vnf_id, sv.name as vnf_name, sce_vnf_id, datacenter_id"\
+                ", datacenter_tenant_id, v.mgmt_access, sv.member_vnf_index, v.osm_id as vnfd_osm_id "\
+                "FROM instance_vnfs as iv left join sce_vnfs as sv "\
+                " on iv.sce_vnf_id=sv.uuid join vnfs as v on iv.vnf_id=v.uuid " \
+                "WHERE iv.instance_scenario_id='{}' " \
+                "ORDER BY iv.created_at ".format(instance_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        instance_dict['vnfs'] = self.cur.fetchall()
+        for vnf in instance_dict['vnfs']:
+            vnf["ip_address"] = None
+            vnf_mgmt_access_iface = None
+            vnf_mgmt_access_vm = None
+            if vnf["mgmt_access"]:
+                vnf_mgmt_access = yaml.load(vnf["mgmt_access"], Loader=yaml.Loader)
+                vnf_mgmt_access_iface = vnf_mgmt_access.get("interface_id")
+                vnf_mgmt_access_vm = vnf_mgmt_access.get("vm_id")
+                vnf["ip_address"] = vnf_mgmt_access.get("ip-address")
+
+            # instance vms
+            cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as "\
+                    "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid, related"\
+                    " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
+                    " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            vnf['vms'] = self.cur.fetchall()
+            for vm in vnf['vms']:
+                vm_manage_iface_list=[]
+                # instance_interfaces
+                cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
+                        " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id, i.uuid"\
+                        " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
+                        " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
+                self.logger.debug(cmd)
+                self.cur.execute(cmd )
+                vm['interfaces'] = self.cur.fetchall()
+                for iface in vm['interfaces']:
+                    if vnf_mgmt_access_iface and vnf_mgmt_access_iface == iface["uuid"]:
+                        if not vnf["ip_address"]:
+                            vnf["ip_address"] = iface["ip_address"]
+                    if iface["type"] == "mgmt" and iface["ip_address"]:
+                        vm_manage_iface_list.append(iface["ip_address"])
+                    if not verbose:
+                        del iface["type"]
+                    del iface["uuid"]
+                if vm_manage_iface_list:
+                    vm["ip_address"] = ",".join(vm_manage_iface_list)
+                    if not vnf["ip_address"] and vnf_mgmt_access_vm == vm["vm_uuid"]:
+                        vnf["ip_address"] = vm["ip_address"]
+                del vm["vm_uuid"]
+
+        #instance_nets
+        #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
+        #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
+        #            "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
+        #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
+        cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
+                "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
+                "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name, related " \
+                "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
+                "left join nets on inets.net_id=nets.uuid " \
+                "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        instance_dict['nets'] = self.cur.fetchall()
+
+        #instance_sfps
+        cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
+                " FROM instance_sfps" \
+                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        instance_dict['sfps'] = self.cur.fetchall()
+
+        # for sfp in instance_dict['sfps']:
+        #instance_sfs
+        cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
+                " FROM instance_sfs" \
+                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        instance_dict['sfs'] = self.cur.fetchall()
+
+        #for sf in instance_dict['sfs']:
+        #instance_sfis
+        cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
+                " FROM instance_sfis" \
+                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        instance_dict['sfis'] = self.cur.fetchall()
+#                            for sfi in instance_dict['sfi']:
+
+        #instance_classifications
+        cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
+                " FROM instance_classifications" \
+                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        instance_dict['classifications'] = self.cur.fetchall()
+#                    for classification in instance_dict['classifications']
+
+        db_base._convert_datetime2str(instance_dict)
+        db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
+        return instance_dict
+
+    @retry(command='delete', extra='No dependences can avoid deleting!!!!')
+    @with_transaction(cursor='dict')
+    def delete_instance_scenario(self, instance_id, tenant_id=None):
+        '''Deletes a instance_Scenario, filtering by one or several of the tenant, uuid or name
+        instance_id is the uuid or the name if it is not a valid uuid format
+        Only one instance_scenario must mutch the filtering or an error is returned
+        '''
+        #instance table
+        where_list=[]
+        if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
+        if db_base._check_valid_uuid(instance_id):
+            where_list.append( "uuid='" + instance_id +"'" )
+        else:
+            where_list.append( "name='" + instance_id +"'" )
+        where_text = " AND ".join(where_list)
+        cmd = "SELECT * FROM instance_scenarios WHERE "+ where_text
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        rows = self.cur.fetchall()
+
+        if self.cur.rowcount==0:
+            raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Bad_Request)
+        elif self.cur.rowcount>1:
+            raise db_base.db_base_Exception("More than one instance found where " + where_text, httperrors.Bad_Request)
+        instance_uuid = rows[0]["uuid"]
+        instance_name = rows[0]["name"]
+
+        #sce_vnfs
+        cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+
+        return instance_uuid + " " + instance_name
+
+    @retry(table='instance_scenarios')
+    @with_transaction
+    def new_instance_scenario(self, instance_scenario_dict, tenant_id):
+        #return self.new_row('vnfs', vnf_dict, None, tenant_id, True, True)
+        return self._new_row_internal('instance_scenarios', instance_scenario_dict, tenant_id, add_uuid=True, root_uuid=None, log=True)
+
+    def update_instance_scenario(self, instance_scenario_dict):
+        #TODO:
+        return
+
+    @retry(table='instance_vnfs')
+    @with_transaction
+    def new_instance_vnf(self, instance_vnf_dict, tenant_id, instance_scenario_id = None):
+        #return self.new_row('vms', vm_dict, tenant_id, True, True)
+        return self._new_row_internal('instance_vnfs', instance_vnf_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+    def update_instance_vnf(self, instance_vnf_dict):
+        #TODO:
+        return
+
+    def delete_instance_vnf(self, instance_vnf_id):
+        #TODO:
+        return
+
+    @retry(table='instance_vms')
+    @with_transaction
+    def new_instance_vm(self, instance_vm_dict, tenant_id, instance_scenario_id = None):
+        #return self.new_row('vms', vm_dict, tenant_id, True, True)
+        return self._new_row_internal('instance_vms', instance_vm_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+    def update_instance_vm(self, instance_vm_dict):
+        #TODO:
+        return
+
+    def delete_instance_vm(self, instance_vm_id):
+        #TODO:
+        return
+
+    @retry(table='instance_nets')
+    @with_transaction
+    def new_instance_net(self, instance_net_dict, tenant_id, instance_scenario_id = None):
+        return self._new_row_internal('instance_nets', instance_net_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+    def update_instance_net(self, instance_net_dict):
+        #TODO:
+        return
+
+    def delete_instance_net(self, instance_net_id):
+        #TODO:
+        return
+
+    @retry(table='instance_interfaces')
+    @with_transaction
+    def new_instance_interface(self, instance_interface_dict, tenant_id, instance_scenario_id = None):
+        return self._new_row_internal('instance_interfaces', instance_interface_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
+
+    def update_instance_interface(self, instance_interface_dict):
+        #TODO:
+        return
+
+    def delete_instance_interface(self, instance_interface_dict):
+        #TODO:
+        return
+
+    @retry(table='datacenter_nets')
+    @with_transaction
+    def update_datacenter_nets(self, datacenter_id, new_net_list=[]):
+        ''' Removes the old and adds the new net list at datacenter list for one datacenter.
+        Attribute
+            datacenter_id: uuid of the datacenter to act upon
+            table: table where to insert
+            new_net_list: the new values to be inserted. If empty it only deletes the existing nets
+        Return: (Inserted items, Deleted items) if OK, (-Error, text) if error
+        '''
+        created_time = time.time()
+        cmd="DELETE FROM datacenter_nets WHERE datacenter_id='{}'".format(datacenter_id)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        deleted = self.cur.rowcount
+        inserted = 0
+        for new_net in new_net_list:
+            created_time += 0.00001
+            self._new_row_internal('datacenter_nets', new_net, add_uuid=True, created_time=created_time)
+            inserted += 1
+        return inserted, deleted
diff --git a/RO/osm_ro/openmano_schemas.py b/RO/osm_ro/openmano_schemas.py
new file mode 100644 (file)
index 0000000..8fd2889
--- /dev/null
@@ -0,0 +1,1268 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API
+'''
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ ="$09-oct-2014 09:09:48$"
+
+#Basis schemas
+patern_name="^[ -~]+$"
+passwd_schema={"type" : "string", "minLength":1, "maxLength":60}
+nameshort_schema={"type" : "string", "minLength":1, "maxLength":60, "pattern" : "^[^,;()'\"]+$"}
+name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
+xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
+description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
+id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 }  #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
+# allows [] for wildcards. For that reason huge length limit is set
+pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\[\]]{12,40}$"}
+
+http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
+bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
+memory_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]i?[Bb])?$"}
+integer0_schema={"type":"integer","minimum":0}
+integer1_schema={"type":"integer","minimum":1}
+path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
+vlan_schema={"type":"integer","minimum":1,"maximum":4095}
+vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
+mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0
+#mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
+ip_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
+ip_prefix_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
+port_schema={"type":"integer","minimum":1,"maximum":65534}
+object_schema={"type":"object"}
+schema_version_2={"type":"integer","minimum":2,"maximum":2}
+#schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
+log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
+checksum_schema={"type":"string", "pattern":"^[0-9a-fA-F]{32}$"}
+size_schema={"type":"integer","minimum":1,"maximum":100}
+boolean_schema = {"type": "boolean"}
+null_schema = {"type": "null"}
+
+metadata_schema={
+    "type":"object",
+    "properties":{
+        "architecture": {"type":"string"},
+        "use_incremental": {"type":"string","enum":["yes","no"]},
+        "vpci": pci_schema,
+        "os_distro": {"type":"string"},
+        "os_type": {"type":"string"},
+        "os_version": {"type":"string"},
+        "bus": {"type":"string"},
+        "topology": {"type":"string", "enum": ["oneSocket"]}
+    }
+}
+
+#Schema for the configuration file
+config_schema = {
+    "title":"configuration response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "http_port": port_schema,
+        "http_admin_port": port_schema,
+        "http_host": nameshort_schema,
+        "auto_push_VNF_to_VIMs": boolean_schema,
+        "vnf_repository": path_schema,
+        "db_host": nameshort_schema,
+        "db_user": nameshort_schema,
+        "db_passwd": {"type":"string"},
+        "db_name": nameshort_schema,
+        "db_ovim_host": nameshort_schema,
+        "db_ovim_user": nameshort_schema,
+        "db_ovim_passwd": {"type":"string"},
+        "db_ovim_name": nameshort_schema,
+        # Next fields will disappear once the MANO API includes appropriate primitives
+        "vim_url": http_schema,
+        "vim_url_admin": http_schema,
+        "vim_name": nameshort_schema,
+        "vim_tenant_name": nameshort_schema,
+        "mano_tenant_name": nameshort_schema,
+        "mano_tenant_id": id_schema,
+        "http_console_proxy": boolean_schema,
+        "http_console_host": nameshort_schema,
+        "http_console_ports": {
+            "type": "array",
+            "items": {"OneOf": [
+                port_schema,
+                {"type": "object", "properties": {"from": port_schema, "to": port_schema}, "required": ["from", "to"]}
+            ]}
+        },
+        "log_level": log_level_schema,
+        "log_socket_level": log_level_schema,
+        "log_level_db": log_level_schema,
+        "log_level_vim": log_level_schema,
+        "log_level_wim": log_level_schema,
+        "log_level_nfvo": log_level_schema,
+        "log_level_http": log_level_schema,
+        "log_level_console": log_level_schema,
+        "log_level_ovim": log_level_schema,
+        "log_file_db": path_schema,
+        "log_file_vim": path_schema,
+        "log_file_wim": path_schema,
+        "log_file_nfvo": path_schema,
+        "log_file_http": path_schema,
+        "log_file_console": path_schema,
+        "log_file_ovim": path_schema,
+        "log_socket_host": nameshort_schema,
+        "log_socket_port": port_schema,
+        "log_file": path_schema,
+    },
+    "required": ['db_user', 'db_passwd', 'db_name'],
+    "additionalProperties": False
+}
+
+tenant_schema = {
+    "title":"tenant information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "name": nameshort_schema,
+                "description": description_schema,
+            },
+            "required": ["name"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+tenant_edit_schema = {
+    "title":"tenant edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+            },
+            "additionalProperties": False
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+datacenter_schema_properties={
+    "name": name_schema,
+    "description": description_schema,
+    "type": nameshort_schema, #currently "openvim" or "openstack", can be enlarged with plugins
+    "vim_url": description_schema,
+    "vim_url_admin": description_schema,
+    "config": { "type":"object" }
+}
+
+datacenter_schema = {
+    "title":"datacenter information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":datacenter_schema_properties,
+            "required": ["name", "vim_url"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+
+datacenter_edit_schema = {
+    "title":"datacenter edit nformation schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":datacenter_schema_properties,
+            "additionalProperties": False
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+
+netmap_new_schema = {
+    "title":"netmap new information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "netmap":{   #delete from datacenter
+            "type":"object",
+            "properties":{
+                "name": name_schema,  #name or uuid of net to change
+                "vim_id": id_schema,
+                "vim_name": name_schema
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["netmap"],
+    "additionalProperties": False
+}
+
+netmap_edit_schema = {
+    "title":"netmap edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "netmap":{   #delete from datacenter
+            "type":"object",
+            "properties":{
+                "name": name_schema,  #name or uuid of net to change
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["netmap"],
+    "additionalProperties": False
+}
+
+datacenter_action_schema = {
+    "title":"datacenter action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "check-connectivity": {"type": "null"},
+        "net-update": {"type": "null"},
+        "net-edit": {
+            "type":"object",
+            "properties":{
+                "net": name_schema,  #name or uuid of net to change
+                "name": name_schema,
+                "description": description_schema,
+                "shared": boolean_schema
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+        "net-delete":{
+            "type":"object",
+            "properties":{
+                "net": name_schema,  #name or uuid of net to change
+            },
+            "required": ["net"],
+            "additionalProperties": False
+        },
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+
+datacenter_associate_schema={
+    "title":"datacenter associate information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type": "object",
+            "properties": {
+                "name": name_schema,
+                "vim_id": id_schema,
+                "vim_tenant": name_schema,
+                "vim_tenant_name": name_schema,
+                "vim_username": nameshort_schema,
+                "vim_password": nameshort_schema,
+                "config": {"type": "object"}
+            },
+            # "required": ["vim_tenant"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+dhcp_schema = {
+    "title": "DHCP schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties":{
+        "enabled": boolean_schema,
+        "start-address": {"OneOf": [null_schema, ip_schema]},
+        "count": integer0_schema
+    },
+    # "required": ["start-address", "count"],
+}
+
+ip_profile_schema = {
+    "title": "IP profile schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "ip-version": {"type": "string", "enum": ["IPv4", "IPv6"]},
+        "subnet-address": ip_prefix_schema,
+        "gateway-address": ip_schema,
+        "dns-address": {"oneOf": [ip_schema,     # for backward compatibility
+                                  {"type": "array", "items": ip_schema}]},
+        "dhcp": dhcp_schema
+    },
+}
+
+key_pair_schema = {
+    "title": "Key-pair schema for cloud-init configuration schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "key": {"type":"string"}
+    },
+    "required": ["key"],
+    "additionalProperties": False
+}
+
+cloud_config_user_schema = {
+    "title": "User schema for cloud-init configuration schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name": nameshort_schema,
+        "user-info": {"type":"string"},
+        #"key-pairs": {"type" : "array", "items": key_pair_schema}
+        "key-pairs": {"type" : "array", "items": {"type":"string"}}
+    },
+    "required": ["name"],
+    "additionalProperties": False
+}
+
+cloud_config_schema = {
+    "title": "Cloud-init configuration schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        #"key-pairs": {"type" : "array", "items": key_pair_schema},
+        "key-pairs": {"type" : "array", "items": {"type":"string"}},
+        "users": {"type" : "array", "items": cloud_config_user_schema}
+    },
+    "additionalProperties": False
+}
+
+internal_connection_element_schema = {
+    "type":"object",
+    "properties":{
+        "VNFC": name_schema,
+        "local_iface_name": name_schema
+    }
+}
+
+internal_connection_element_schema_v02 = {
+    "type":"object",
+    "properties":{
+        "VNFC": name_schema,
+        "local_iface_name": name_schema,
+        "ip_address": ip_schema
+    }
+}
+
+internal_connection_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description":description_schema,
+        "type":{"type":"string", "enum":["bridge","data","ptp"]},
+        "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":1}
+    },
+    "required": ["name", "type", "elements"],
+    "additionalProperties": False
+}
+
+internal_connection_schema_v02 = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description":description_schema,
+        "type": {"type": "string", "enum":["e-line", "e-lan"]},
+        "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+        "ip-profile": ip_profile_schema,
+        "elements": {"type" : "array", "items": internal_connection_element_schema_v02, "minItems":1}
+    },
+    "required": ["name", "type", "implementation", "elements"],
+    "additionalProperties": False
+}
+
+external_connection_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "type":{"type":"string", "enum":["mgmt","bridge","data"]},
+        "VNFC": name_schema,
+        "local_iface_name": name_schema ,
+        "description":description_schema
+    },
+    "required": ["name", "type", "VNFC", "local_iface_name"],
+    "additionalProperties": False
+}
+
+#Not yet used
+external_connection_schema_v02 = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "mgmt": boolean_schema,
+        "type": {"type": "string", "enum":["e-line", "e-lan"]},
+        "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+        "VNFC": name_schema,
+        "local_iface_name": name_schema ,
+        "description":description_schema
+    },
+    "required": ["name", "type", "VNFC", "local_iface_name"],
+    "additionalProperties": False
+}
+
+interfaces_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name":name_schema,
+            "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "mac_address": mac_schema
+        },
+        "additionalProperties": False,
+        "required": ["name","dedicated", "bandwidth"]
+    }
+}
+
+bridge_interfaces_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name": name_schema,
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "mac_address": mac_schema,
+            "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139", "paravirt"]},
+            "port-security": boolean_schema,
+            "floating-ip": boolean_schema,
+        },
+        "additionalProperties": False,
+        "required": ["name"]
+    }
+}
+
+devices_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "type":{"type":"string", "enum":["disk","cdrom","xml"] },
+            "image": path_schema,
+            "image name": name_schema,
+            "image checksum": checksum_schema,
+            "image metadata": metadata_schema,
+            "size": size_schema,
+            "vpci":pci_schema,
+            "xml":xml_text_schema,
+            "name": name_schema,
+        },
+        "additionalProperties": False,
+        "required": ["type"]
+    }
+}
+
+
+numa_schema = {
+    "type": "object",
+    "properties": {
+        "memory":integer1_schema,
+        "cores":integer1_schema,
+        "paired-threads":integer1_schema,
+        "threads":integer1_schema,
+        "cores-id":{"type":"array","items":integer0_schema},
+        "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
+        "threads-id":{"type":"array","items":integer0_schema},
+        "interfaces":interfaces_schema
+    },
+    "additionalProperties": False,
+    #"required": ["memory"]
+}
+
+config_files_schema = {
+    "title": "Config files for cloud init schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "dest": path_schema,
+        "encoding": {"type": "string", "enum": ["b64", "base64", "gz", "gz+b64", "gz+base64", "gzip+b64", "gzip+base64"]},  #by default text
+        "content": {"type": "string"},
+        "permissions": {"type": "string"}, # tiypically octal notation '0644'
+        "owner": {"type": "string"},  # format:   owner:group
+
+    },
+    "additionalProperties": False,
+    "required": ["dest", "content"],
+}
+
+boot_data_vdu_schema  = {
+    "title": "Boot data (Cloud-init) configuration schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties":{
+        "key-pairs": {"type" : "array", "items": {"type":"string"}},
+        "users": {"type" : "array", "items": cloud_config_user_schema},
+        "user-data": {"type" : "string"},  # scrip to run
+        "config-files": {"type": "array", "items": config_files_schema},
+        # NOTE: “user-data” are mutually exclusive with users and config-files because user/files are injected using user-data
+        "boot-data-drive": boolean_schema,
+    },
+    "additionalProperties": False,
+}
+
+vnfc_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description": description_schema,
+        "count": integer1_schema,
+        "image name": name_schema,
+        "availability_zone": name_schema,
+        "VNFC image": {"oneOf": [path_schema, http_schema]},
+        "image checksum": checksum_schema,
+        "image metadata": metadata_schema,
+        #"cloud-config": cloud_config_schema, #common for all vnfs in the scenario
+        "processor": {
+            "type":"object",
+            "properties":{
+                "model":description_schema,
+                "features":{"type":"array","items":nameshort_schema}
+            },
+            "required": ["model"],
+            "additionalProperties": False
+        },
+        "hypervisor": {
+            "type":"object",
+            "properties":{
+                "type":nameshort_schema,
+                "version":description_schema
+            },
+        },
+        "ram":integer0_schema,
+        "vcpus":integer0_schema,
+        "disk": integer1_schema,
+        "numas": {
+            "type": "array",
+            "items": numa_schema
+        },
+        "bridge-ifaces": bridge_interfaces_schema,
+        "devices": devices_schema,
+        "boot-data" : boot_data_vdu_schema
+
+    },
+    "required": ["name"],
+    "oneOf": [
+        {"required": ["VNFC image"]},
+        {"required": ["image name"]}
+    ],
+    "additionalProperties": False
+}
+
+vnfd_schema_v01 = {
+    "title":"vnfd information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "vnf":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+
+                "class": nameshort_schema,
+                "public": boolean_schema,
+                "physical": boolean_schema,
+                "default_user": name_schema,
+                "tenant_id": id_schema, #only valid for admin
+                "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+                "internal-connections": {"type" : "array", "items": internal_connection_schema, "minItems":1},
+                "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+            },
+            "required": ["name","external-connections"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["vnf"],
+    "additionalProperties": False
+}
+
+#VNFD schema for OSM R1
+vnfd_schema_v02 = {
+    "title":"vnfd information schema v0.2",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": {"type": "string", "enum": ["0.2"]},
+        "vnf":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+                "class": nameshort_schema,
+                "public": boolean_schema,
+                "physical": boolean_schema,
+                "tenant_id": id_schema, #only valid for admin
+                "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+                "internal-connections": {"type" : "array", "items": internal_connection_schema_v02, "minItems":1},
+                # "cloud-config": cloud_config_schema, #common for all vnfcs
+                "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+            },
+            "required": ["name"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["vnf", "schema_version"],
+    "additionalProperties": False
+}
+
+#vnfd_schema = vnfd_schema_v01
+#{
+#    "title":"vnfd information schema v0.2",
+#    "$schema": "http://json-schema.org/draft-04/schema#",
+#    "oneOf": [vnfd_schema_v01, vnfd_schema_v02]
+#}
+
+graph_schema = {
+    "title":"graphical scenario descriptor information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "x":      integer0_schema,
+        "y":      integer0_schema,
+        "ifaces": {
+            "type":"object",
+            "properties":{
+                "left": {"type":"array"},
+                "right": {"type":"array"},
+                "bottom": {"type":"array"},
+            }
+        }
+    },
+    "required": ["x","y"]
+}
+
+nsd_schema_v01 = {
+    "title":"network scenario descriptor information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name":name_schema,
+        "description": description_schema,
+        "tenant_id": id_schema, #only valid for admin
+        "public": boolean_schema,
+        "topology":{
+            "type":"object",
+            "properties":{
+                "nodes": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "type":{"type":"string", "enum":["VNF", "other_network", "network", "external_network"]},
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                            },
+                            "patternProperties":{
+                                "^(VNF )?model$": {"type": "string"}
+                            },
+                            "required": ["type"]
+                        }
+                    }
+                },
+                "connections": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "nodes":{"oneOf":[{"type":"object", "minProperties":2}, {"type":"array", "minLength":1}]},
+                                "type": {"type": "string", "enum":["link", "external_network", "dataplane_net", "bridge_net"]},
+                                "graph": graph_schema
+                            },
+                            "required": ["nodes"]
+                        },
+                    }
+                }
+            },
+            "required": ["nodes"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["name","topology"],
+    "additionalProperties": False
+}
+
+nsd_schema_v02 = {
+    "title":"network scenario descriptor information schema v0.2",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": schema_version_2,
+        "scenario":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+                "tenant_id": id_schema, #only valid for admin
+                "public": boolean_schema,
+                "vnfs": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                                "vnf_name": name_schema,
+                            },
+                        }
+                    },
+                    "minProperties": 1
+                },
+                "networks": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "interfaces":{"type":"array", "minLength":1},
+                                "type": {"type": "string", "enum":["dataplane", "bridge"]},
+                                "external" : boolean_schema,
+                                "graph": graph_schema
+                            },
+                            "required": ["interfaces"]
+                        },
+                    }
+                },
+
+            },
+            "required": ["vnfs", "name"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["scenario","schema_version"],
+    "additionalProperties": False
+}
+
+#NSD schema for OSM R1
+nsd_schema_v03 = {
+    "title":"network scenario descriptor information schema v0.3",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": {"type": "string", "enum": ["0.3"]},
+        "scenario":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+                "tenant_id": id_schema, #only valid for admin
+                "public": boolean_schema,
+                "cloud-config": cloud_config_schema, #common for all vnfs in the scenario
+                #"datacenter": name_schema,
+                "vnfs": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                                "vnf_name": name_schema,
+                                #"cloud-config": cloud_config_schema, #particular for a vnf
+                                #"datacenter": name_schema,
+                                "internal-connections": {
+                                    "type": "object",
+                                    "patternProperties": {
+                                        ".": {
+                                            "type": "object",
+                                            "properties": {
+                                                "ip-profile": ip_profile_schema,
+                                                "elements": {
+                                                    "type" : "array",
+                                                    "items":{
+                                                        "type":"object",
+                                                        "properties":{
+                                                            "VNFC": name_schema,
+                                                            "local_iface_name": name_schema,
+                                                            "ip_address": ip_schema,
+                                                        },
+                                                        "required": ["VNFC", "local_iface_name"],
+                                                    }
+                                                }
+                                            }
+                                        }
+                                    }
+                                }
+                            },
+                        }
+                    },
+                    "minProperties": 1
+                },
+                "networks": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "interfaces":{
+                                    "type":"array",
+                                    "minLength":1,
+                                    "items":{
+                                        "type":"object",
+                                        "properties":{
+                                            "vnf": name_schema,
+                                            "vnf_interface": name_schema,
+                                            "ip_address": ip_schema
+                                        },
+                                        "required": ["vnf", "vnf_interface"],
+                                    }
+                                },
+                                "type": {"type": "string", "enum":["e-line", "e-lan"]},
+                                "implementation": {"type": "string", "enum":["overlay", "underlay"]},
+                                "external" : boolean_schema,
+                                "graph": graph_schema,
+                                "ip-profile": ip_profile_schema
+                            },
+                            "required": ["interfaces"]
+                        },
+                    }
+                },
+
+            },
+            "required": ["vnfs", "networks","name"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["scenario","schema_version"],
+    "additionalProperties": False
+}
+
+#scenario_new_schema = {
+#    "title":"new scenario information schema",
+#    "$schema": "http://json-schema.org/draft-04/schema#",
+#    #"oneOf": [nsd_schema_v01, nsd_schema_v02]
+#    "oneOf": [nsd_schema_v01]
+#}
+
+scenario_edit_schema = {
+    "title":"edit scenario information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name":name_schema,
+        "description": description_schema,
+        "topology":{
+            "type":"object",
+            "properties":{
+                "nodes": {
+                    "type":"object",
+                    "patternProperties":{
+                        "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$": {
+                            "type":"object",
+                            "properties":{
+                                "graph":{
+                                    "type": "object",
+                                    "properties":{
+                                        "x": integer0_schema,
+                                        "y": integer0_schema,
+                                        "ifaces":{ "type": "object"}
+                                    }
+                                },
+                                "description": description_schema,
+                                "name": name_schema
+                            }
+                        }
+                    }
+                }
+            },
+            "required": ["nodes"],
+            "additionalProperties": False
+        }
+    },
+    "additionalProperties": False
+}
+
+scenario_action_schema = {
+    "title":"scenario action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "start":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "deploy":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "reserve":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "verify":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        }
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+instance_scenario_object = {
+    "title": "scenario object used to create an instance not based on any nsd",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "nets": {
+            "type": "array",
+            "minLength": 1,
+            "items": {
+                "type": "object",
+                "properties": {
+                    "name": name_schema,
+                    "external": boolean_schema,
+                    "type": {"enum": ["bridge", "ptp", "data"]},  # for overlay, underlay E-LINE, underlay E-LAN
+                },
+                "additionalProperties": False,
+                "required": ["name", "external", "type"]
+            }
+        }
+    },
+    "additionalProperties": False,
+    "required": ["nets"]
+}
+
+instance_scenario_create_schema_v01 = {
+    "title": "instance scenario create information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "schema_version": {"type": "string", "enum": ["0.1"]},
+        "instance": {
+            "type": "object",
+            "properties": {
+                "mgmt_keys": {"type": "array", "items": {"type":"string"}},
+                "vduImage": name_schema,
+                "name": name_schema,
+                "description":description_schema,
+                "datacenter": name_schema,
+                "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
+                "scenario" : {"oneOff": [name_schema, instance_scenario_object]},  # can be an UUID or name or a dict
+                "action":{"enum": ["deploy","reserve","verify" ]},
+                "connect_mgmt_interfaces": {"oneOf": [boolean_schema, {"type":"object"}]},# can be true or a dict with datacenter: net_name
+                "cloud-config": cloud_config_schema, #common to all vnfs in the instance scenario
+                "vnfs":{             #mapping from scenario to datacenter
+                    "type": "object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "name": name_schema, #override vnf name
+                                "datacenter": name_schema,
+                                #"metadata": {"type": "object"},
+                                #"user_data": {"type": "string"}
+                                #"cloud-config": cloud_config_schema, #particular for a vnf
+                                "vdus": {
+                                    "type": "object",
+                                    "patternProperties": {
+                                        ".": {
+                                            "type": "object",
+                                            "properties": {
+                                                "name": name_schema,  # overrides vdu name schema
+                                                "mgmt_keys": {"type": "array", "items": {"type": "string"}},
+                                                "vduImage": name_schema,
+                                                "devices": {
+                                                    "type": "object",
+                                                    "patternProperties": {
+                                                        ".": {
+                                                            "vim_id": name_schema,
+                                                        }
+                                                    }
+                                                },
+                                                "interfaces": {
+                                                    "type": "object",
+                                                    "patternProperties": {
+                                                        ".": {
+                                                            "ip_address": ip_schema,
+                                                            "mac_address": mac_schema,
+                                                            "floating-ip": boolean_schema,
+                                                        }
+                                                    }
+                                                }
+                                            }
+                                        }
+                                    }
+                                },
+                                "networks": {
+                                    "type": "object",
+                                    "patternProperties": {
+                                        ".": {
+                                            "type": "object",
+                                            "properties": {
+                                                "vim-network-name": name_schema,
+                                                "vim-network-id": name_schema,
+                                                "ip-profile": ip_profile_schema,
+                                                "name": name_schema,
+                                            }
+                                        }
+                                    }
+                                },
+                            }
+                        }
+                    },
+                },
+                "networks":{             #mapping from scenario to datacenter
+                    "type": "object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "interfaces":{
+                                    "type":"array",
+                                    "minLength":1,
+                                    "items":{
+                                        "type":"object",
+                                        "properties":{
+                                            "ip_address": ip_schema,
+                                            "datacenter": name_schema,
+                                            "vim-network-name": name_schema,
+                                            "vim-network-id": name_schema
+                                        },
+                                        "patternProperties":{
+                                            ".": {"type": "string"}
+                                        }
+                                    }
+                                },
+                                "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
+                                "ip-profile": ip_profile_schema,
+                                "use-network": {
+                                    "type": "object",
+                                    "properties": {
+                                        "instance_scenario_id": id_schema,
+                                        # "member_vnf_index": name_schema,  # if not null, network inside VNF
+                                        "osm_id": name_schema,  # sce_network osm_id or name
+                                    },
+                                    "additionalProperties": False,
+                                    "required": ["instance_scenario_id", "osm_id"]
+                                },
+                                #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
+                                "sites": {
+                                    "type":"array",
+                                    "minLength":1,
+                                    "items":{
+                                        "type":"object",
+                                        "properties":{
+                                            # By default for an scenario 'external' network openmano looks for an existing VIM network to map this external scenario network,
+                                            # for other networks openamno creates at VIM
+                                            # Use netmap-create to force to create an external scenario network
+                                            "netmap-create": {"oneOf":[name_schema,null_schema]}, #datacenter network to use. Null if must be created as an internal net
+                                            #netmap-use:   Indicates an existing VIM network that must be used for this scenario network.
+                                            #Can use both the VIM network name (if it is not ambiguous) or the VIM net UUID
+                                            #If both 'netmap-create' and 'netmap-use'are supplied, netmap-use precedes, but if fails openmano follows the netmap-create
+                                            #In oder words, it is the same as 'try to map to the VIM network (netmap-use) if exist, and if not create the network (netmap-create)
+                                            "netmap-use": name_schema, #
+                                            "vim-network-name": name_schema, #override network name
+                                            "vim-network-id": name_schema,
+                                            #"ip-profile": ip_profile_schema,
+                                            "datacenter": name_schema,
+                                        }
+                                    }
+                                },
+                            }
+                        }
+                    },
+                },
+            },
+            "additionalProperties": False,
+            "required": ["name"]
+        },
+    },
+    "required": ["instance"],
+    "additionalProperties": False
+}
+
+instance_scenario_action_schema = {
+    "title": "instance scenario action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "start": null_schema,
+        "pause": null_schema,
+        "resume": null_schema,
+        "shutoff": null_schema,
+        "shutdown": null_schema,
+        "forceOff": null_schema,
+        "rebuild": null_schema,
+        "reboot": {
+            "type": ["object", "null"],
+        },
+        "add_public_key": {"type" : "string"},
+        "user": nameshort_schema,
+        "console": {"type": ["string", "null"], "enum": ["novnc", "xvpvnc", "rdp-html5", "spice-html5", None]},
+        "vdu-scaling": {
+            "type": "array",
+            "items": {
+                "type": "object",
+                "properties": {
+                    "vdu-id": id_schema,
+                    "osm_vdu_id": name_schema,
+                    "member-vnf-index": name_schema,
+                    "count": integer1_schema,
+                    "type": {"enum": ["create", "delete"]}
+                },
+                "additionalProperties": False,
+                "minProperties": 1,
+                "required": ["type"]
+            }
+        },
+        "vnfs": {"type": "array", "items": {"type": "string"}},
+        "vms": {"type": "array", "items": {"type": "string"}}
+    },
+    "minProperties": 1,
+    #"maxProperties": 1,
+    "additionalProperties": False
+}
+
+sdn_controller_properties={
+    "name": name_schema,
+    "dpid": {"type":"string", "pattern":"^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}$"},
+    "ip": ip_schema,
+    "port": port_schema,
+    "type": {"type": "string", "enum": ["opendaylight","floodlight","onos"]},
+    "version": {"type" : "string", "minLength":1, "maxLength":12},
+    "user": nameshort_schema,
+    "password": passwd_schema
+}
+sdn_controller_schema = {
+    "title":"sdn controller information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "sdn_controller":{
+            "type":"object",
+            "properties":sdn_controller_properties,
+            "required": ["name", "port", 'ip', 'dpid', 'type'],
+            "additionalProperties": False
+        }
+    },
+    "required": ["sdn_controller"],
+    "additionalProperties": False
+}
+
+sdn_controller_edit_schema = {
+    "title":"sdn controller update information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "sdn_controller":{
+            "type":"object",
+            "properties":sdn_controller_properties,
+            "additionalProperties": False
+        }
+    },
+    "required": ["sdn_controller"],
+    "additionalProperties": False
+}
+
+sdn_port_mapping_schema  = {
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "title":"sdn port mapping information schema",
+    "type": "object",
+    "properties": {
+        "sdn_port_mapping": {
+            "type": "array",
+            "items": {
+                "type": "object",
+                "properties": {
+                    "compute_node": nameshort_schema,
+                    "ports": {
+                        "type": "array",
+                        "items": {
+                            "type": "object",
+                            "properties": {
+                                "pci": {"OneOf": [null_schema, pci_extended_schema]},       # pci_schema,
+                                "switch_port": nameshort_schema,
+                                "switch_mac": mac_schema
+                            },
+                            "required": ["pci"]
+                        }
+                    }
+                },
+                "required": ["compute_node", "ports"]
+            }
+        }
+    },
+    "required": ["sdn_port_mapping"]
+}
+
+sdn_external_port_schema = {
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "title":"External port ingformation",
+    "type": "object",
+    "properties": {
+        "port": {"type" : "string", "minLength":1, "maxLength":60},
+        "vlan": vlan_schema,
+        "mac": mac_schema
+    },
+    "required": ["port"]
+}
diff --git a/RO/osm_ro/openmanoclient.py b/RO/osm_ro/openmanoclient.py
new file mode 100644 (file)
index 0000000..fc8bde1
--- /dev/null
@@ -0,0 +1,1220 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+openmano python client used to interact with openmano-server
+"""
+
+import requests
+import json
+import yaml
+import logging
+import sys
+
+__author__ = "Alfonso Tierno, Pablo Montes"
+__date__ = "$09-Mar-2016 09:09:48$"
+__version__ = "0.1.0-r470"
+version_date = "Oct 2017"
+
+from urllib.parse import quote
+
+class OpenmanoException(Exception):
+    '''Common Exception for all openmano client exceptions'''
+
+class OpenmanoBadParamsException(OpenmanoException):
+    '''Bad or missing input parameters'''
+
+class OpenmanoResponseException(OpenmanoException):
+    '''Unexpected response from openmano server'''
+
+class OpenmanoNotFoundException(OpenmanoException):
+    '''Not found at server'''
+
+# class vnf():
+#     def __init__(self, message):
+#         print "Error: %s" %message
+#         print
+#         self.print_usage()
+#         #self.print_help()
+#         print
+#         print "Type 'openmano -h' for help"
+
+class openmanoclient():
+    headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'}
+    
+    def __init__(self, **kwargs):
+        self.username = kwargs.get("username")
+        self.password = kwargs.get("password")
+        self.endpoint_url = kwargs.get("endpoint_url")
+        self.tenant_id = kwargs.get("tenant_id")
+        self.tenant_name = kwargs.get("tenant_name")
+        self.tenant = None
+        self.datacenter_id = kwargs.get("datacenter_id")
+        self.datacenter_name = kwargs.get("datacenter_name")
+        self.datacenter = None
+        self.logger = logging.getLogger(kwargs.get('logger','manoclient'))
+        if kwargs.get("debug"):
+            self.logger.setLevel(logging.DEBUG)
+        
+    def __getitem__(self, index):
+        if index=='tenant_name':
+            return self.tenant_name
+        elif index=='tenant_id':
+            return self.tenant_id
+        elif index=='datacenter_name':
+            return self.datacenter_name
+        elif index=='datacenter_id':
+            return self.datacenter_id
+        elif index=='username':
+            return self.username
+        elif index=='password':
+            return self.password
+        elif index=='endpoint_url':
+            return self.endpoint_url
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+        
+    def __setitem__(self,index, value):
+        if index=='tenant_name':
+            self.tenant_name = value
+        elif index=='tenant_id':
+            self.tenant_id = value
+        elif index=='datacenter_name':
+            self.datacenter_name = value
+        elif index=='datacenter_id':
+            self.datacenter_id = value
+        elif index=='username':
+            self.username = value
+        elif index=='password':
+            self.password = value
+        elif index=='endpoint_url':
+            self.endpoint_url = value
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+        self.tenant = None # force to reload tenant with different credentials
+        self.datacenter = None # force to reload datacenter with different credentials
+    
+    def _parse(self, descriptor, descriptor_format, response=False):
+        #try yaml
+        if descriptor_format and descriptor_format != "json" and descriptor_format != "yaml":
+            raise  OpenmanoBadParamsException("'descriptor_format' must be a 'json' or 'yaml' text")
+        if descriptor_format != "json":
+            try:
+                return yaml.load(descriptor, Loader=yaml.SafeLoader)
+            except yaml.YAMLError as exc:
+                error_pos = ""
+                if hasattr(exc, 'problem_mark'):
+                    mark = exc.problem_mark
+                    error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
+                error_text = "yaml format error" + error_pos
+        elif descriptor_format != "yaml":
+            try:
+                return json.loads(descriptor) 
+            except Exception as e:
+                if response:
+                    error_text = "json format error" + str(e)
+
+        if response:
+            raise OpenmanoResponseException(error_text)
+        raise  OpenmanoBadParamsException(error_text)
+    
+    def _parse_yaml(self, descriptor, response=False):
+        try:
+            return yaml.load(descriptor, Loader=yaml.SafeLoader)
+        except yaml.YAMLError as exc:
+            error_pos = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
+            error_text = "yaml format error" + error_pos
+            if response:
+                raise OpenmanoResponseException(error_text)
+            raise  OpenmanoBadParamsException(error_text)
+
+    
+    def _get_item_uuid(self, item, item_id=None, item_name=None, all_tenants=False):
+        if all_tenants == None:
+            tenant_text = ""
+        elif all_tenants == False:
+            tenant_text = "/" + self.tenant
+        else:
+            tenant_text = "/any"
+        URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+        self.logger.debug("GET %s", URLrequest )
+        mano_response = requests.get(URLrequest, headers=self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+        content = self._parse_yaml(mano_response.text, response=True)
+        #print content
+        found = 0
+        if not item_id and not item_name:
+            raise OpenmanoResponseException("Missing either {0}_name or {0}_id".format(item[:-1]))
+        for i in content[item]:
+            if item_id and i["uuid"] == item_id:
+                return item_id
+            elif item_name and i["name"] == item_name:
+                uuid = i["uuid"]
+                found += 1
+            
+        if found == 0:
+            if item_id:
+                raise OpenmanoNotFoundException("No {} found with id '{}'".format(item[:-1], item_id))
+            else:
+                #print(item, item_name)
+                raise OpenmanoNotFoundException("No {} found with name '{}'".format(item[:-1], item_name) )
+        elif found > 1:
+            raise OpenmanoNotFoundException("{} {} found with name '{}'. uuid must be used".format(found, item, item_name))
+        return uuid
+
+    def _get_item(self, item, uuid=None, name=None, all_tenants=False):
+        if all_tenants:
+            tenant_text = "/any"
+        elif all_tenants==None:
+            tenant_text = ""
+        else:
+            tenant_text = "/"+self._get_tenant()
+        if not uuid:
+            #check that exist
+            uuid = self._get_item_uuid(item, uuid, name, all_tenants)
+        
+        URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+        self.logger.debug("GET %s", URLrequest )
+        mano_response = requests.get(URLrequest, headers=self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code==200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))        
+
+    def _get_tenant(self):
+        if not self.tenant:
+            self.tenant = self._get_item_uuid("tenants", self.tenant_id, self.tenant_name, None)
+        return self.tenant
+    
+    def _get_datacenter(self):
+        if not self.tenant:
+            self._get_tenant()
+        if not self.datacenter:
+            self.datacenter = self._get_item_uuid("datacenters", self.datacenter_id, self.datacenter_name, False)
+        return self.datacenter
+
+    def _create_item(self, item, descriptor, all_tenants=False, api_version=None):
+        if all_tenants:
+            tenant_text = "/any"
+        elif all_tenants is None:
+            tenant_text = ""
+        else:
+            tenant_text = "/"+self._get_tenant()
+        payload_req = yaml.safe_dump(descriptor)
+
+        api_version_text = ""
+        if api_version:
+            api_version_text = "/v3"
+            
+        #print payload_req
+            
+        URLrequest = "{}{apiver}{tenant}/{item}".format(self.endpoint_url, apiver=api_version_text, tenant=tenant_text,
+                                                        item=item)
+        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+        mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
+        self.logger.debug("openmano response: %s", mano_response.text)
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code == 200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))        
+
+    def _del_item(self, item, uuid=None, name=None, all_tenants=False):
+        if all_tenants:
+            tenant_text = "/any"
+        elif all_tenants==None:
+            tenant_text = ""
+        else:
+            tenant_text = "/"+self._get_tenant()
+        if not uuid:
+            #check that exist
+            uuid = self._get_item_uuid(item, uuid, name, all_tenants)
+        
+        URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+        self.logger.debug("DELETE %s", URLrequest )
+        mano_response = requests.delete(URLrequest, headers = self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code==200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))        
+    
+    def _list_item(self, item, all_tenants=False, filter_dict=None):
+        if all_tenants:
+            tenant_text = "/any"
+        elif all_tenants==None:
+            tenant_text = ""
+        else:
+            tenant_text = "/"+self._get_tenant()
+        
+        URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
+        separator="?"
+        if filter_dict:
+            for k in filter_dict:
+                URLrequest += separator + quote(str(k)) + "=" + quote(str(filter_dict[k])) 
+                separator = "&"
+        self.logger.debug("openmano GET %s", URLrequest)
+        mano_response = requests.get(URLrequest, headers=self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code==200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))        
+
+    def _edit_item(self, item, descriptor, uuid=None, name=None, all_tenants=False):
+        if all_tenants:
+            tenant_text = "/any"
+        elif all_tenants==None:
+            tenant_text = ""
+        else:
+            tenant_text = "/"+self._get_tenant()
+
+        if not uuid:
+            #check that exist
+            uuid = self._get_item_uuid("tenants", uuid, name, all_tenants)
+        
+        payload_req = yaml.safe_dump(descriptor)
+            
+        #print payload_req
+            
+        URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
+        self.logger.debug("openmano PUT %s %s", URLrequest, payload_req)
+        mano_response = requests.put(URLrequest, headers = self.headers_req, data=payload_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code==200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))        
+
+    #TENANTS
+    def list_tenants(self, **kwargs):
+        '''Obtain a list of tenants
+        Params: can be filtered by 'uuid','name','description'
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'tenants':[{tenant1_info},{tenant2_info},...]}}
+        '''
+        return self._list_item("tenants", all_tenants=None, filter_dict=kwargs)
+
+    def get_tenant(self, uuid=None, name=None):
+        '''Obtain the information of a tenant
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'tenant':{tenant_info}}
+        '''
+        return self._get_item("tenants", uuid, name, all_tenants=None)
+
+    def delete_tenant(self, uuid=None, name=None):
+        '''Delete a tenant
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        return self._del_item("tenants", uuid, name, all_tenants=None)
+
+    def create_tenant(self, descriptor=None, descriptor_format=None, name=None, description=None):
+        '''Creates a tenant
+        Params: must supply a descriptor or/and just a name
+            descriptor: with format {'tenant':{new_tenant_info}}
+                newtenant_info must contain 'name', and optionally 'description'
+                must be a dictionary or a json/yaml text.
+            name: the tenant name. Overwrite descriptor name if any
+            description: tenant descriptor.. Overwrite descriptor description if any
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'tenant':{new_tenant_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self._parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif name:
+            descriptor={"tenant": {"name": name}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+
+        if 'tenant' not in descriptor or len(descriptor)!=1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+        if name:
+            descriptor['tenant']['name'] = name
+        if description:
+            descriptor['tenant']['description'] = description
+
+        return self._create_item("tenants", descriptor, all_tenants=None)
+
+    def edit_tenant(self, uuid=None, name=None, descriptor=None, descriptor_format=None, new_name=None, new_description=None):
+        '''Edit the parameters of a tenant
+        Params: must supply a descriptor or/and a new_name or new_description
+            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+            descriptor: with format {'tenant':{params to change info}}
+                must be a dictionary or a json/yaml text.
+            name: the tenant name. Overwrite descriptor name if any
+            description: tenant descriptor.. Overwrite descriptor description if any
+        Return: Raises an exception on error, not found or found several
+                Obtain a dictionary with format {'tenant':{newtenant_info}}
+        '''
+
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif new_name or new_description:
+            descriptor={"tenant": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+
+        if 'tenant' not in descriptor or len(descriptor)!=1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+        if new_name:
+            descriptor['tenant']['name'] = new_name
+        if new_description:
+            descriptor['tenant']['description'] = new_description
+
+        return self._edit_item("tenants", descriptor, uuid, name, all_tenants=None)
+
+    #DATACENTERS
+
+    def list_datacenters(self, all_tenants=False, **kwargs):
+        '''Obtain a list of datacenters, that are the VIM information at openmano
+        Params: can be filtered by 'uuid','name','vim_url','type'
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'datacenters':[{datacenter1_info},{datacenter2_info},...]}}
+        '''
+        return self._list_item("datacenters", all_tenants, filter_dict=kwargs)
+
+    def get_datacenter(self, uuid=None, name=None, all_tenants=False):
+        '''Obtain the information of a datacenter
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'datacenter':{datacenter_info}}
+        '''
+        return self._get_item("datacenters", uuid, name, all_tenants)
+
+    def delete_datacenter(self, uuid=None, name=None):
+        '''Delete a datacenter
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several, not free
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        if not uuid:
+            # check that exist
+            uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
+        return self._del_item("datacenters", uuid, name, all_tenants=None)
+
+    def create_datacenter(self, descriptor=None, descriptor_format=None, name=None, vim_url=None, **kwargs):
+#, type="openvim", public=False, description=None):
+        '''Creates a datacenter
+        Params: must supply a descriptor or/and just a name and vim_url
+            descriptor: with format {'datacenter':{new_datacenter_info}}
+                newdatacenter_info must contain 'name', 'vim_url', and optionally 'description'
+                must be a dictionary or a json/yaml text.
+            name: the datacenter name. Overwrite descriptor name if any
+            vim_url: the datacenter URL. Overwrite descriptor vim_url if any
+            vim_url_admin: the datacenter URL for administrative issues. Overwrite descriptor vim_url if any
+            vim_type: the datacenter type, can be openstack or openvim. Overwrite descriptor type if any
+            public: boolean, by default not public
+            description: datacenter description. Overwrite descriptor description if any
+            config: dictionary with extra configuration for the concrete datacenter
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif name and vim_url:
+            descriptor={"datacenter": {"name": name, "vim_url": vim_url}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor, or name and vim_url")
+        
+        if 'datacenter' not in descriptor or len(descriptor)!=1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
+        if name:
+            descriptor['datacenter']['name'] = name
+        if vim_url:
+            descriptor['datacenter']['vim_url'] = vim_url
+        for param in kwargs:
+            descriptor['datacenter'][param] = kwargs[param]
+
+        return self._create_item("datacenters", descriptor, all_tenants=None)
+
+    def edit_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+        '''Edit the parameters of a datacenter
+        Params: must supply a descriptor or/and a parameter to change
+            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+            descriptor: with format {'datacenter':{params to change info}}
+                must be a dictionary or a json/yaml text.
+            parameters to change can be supplyied by the descriptor or as parameters:
+                new_name: the datacenter name
+                vim_url: the datacenter URL
+                vim_url_admin: the datacenter URL for administrative issues
+                vim_type: the datacenter type, can be openstack or openvim.
+                public: boolean, available to other tenants
+                description: datacenter description
+        Return: Raises an exception on error, not found or found several
+                Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
+        '''
+
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif kwargs:
+            descriptor={"datacenter": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+
+        if 'datacenter' not in descriptor or len(descriptor)!=1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
+        for param in kwargs:
+            if param=='new_name':
+                descriptor['datacenter']['name'] = kwargs[param]
+            else:
+                descriptor['datacenter'][param] = kwargs[param]
+        return self._edit_item("datacenters", descriptor, uuid, name, all_tenants=None)
+    
+    def attach_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None,  vim_user=None, vim_password=None, vim_tenant_name=None, vim_tenant_id=None):
+        #check that exist
+        uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
+        tenant_text = "/"+self._get_tenant()
+
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif vim_user or vim_password or vim_tenant_name or vim_tenant_id:
+            descriptor={"datacenter": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor or params")
+        
+        if vim_user or vim_password or vim_tenant_name or vim_tenant_id:
+            #print args.name
+            try:
+                if vim_user:
+                    descriptor['datacenter']['vim_user'] = vim_user
+                if vim_password:
+                    descriptor['datacenter']['vim_password'] = vim_password
+                if vim_tenant_name:
+                    descriptor['datacenter']['vim_tenant_name'] = vim_tenant_name
+                if vim_tenant_id:
+                    descriptor['datacenter']['vim_tenant'] = vim_tenant_id
+            except (KeyError, TypeError) as e:
+                if str(e)=='datacenter':           error_pos= "missing field 'datacenter'"
+                else:                       error_pos="wrong format"
+                raise OpenmanoBadParamsException("Wrong datacenter descriptor: " + error_pos)
+
+        payload_req = yaml.safe_dump(descriptor)
+        #print payload_req
+        URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
+        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+        mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code==200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))        
+
+    def detach_datacenter(self, uuid=None, name=None):
+        if not uuid:
+            #check that exist
+            uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=False)
+        tenant_text = "/"+self._get_tenant()
+        URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
+        self.logger.debug("openmano DELETE %s", URLrequest)
+        mano_response = requests.delete(URLrequest, headers = self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text )
+    
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code==200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))
+
+    # WIMS
+
+    def list_wims(self, all_tenants=False, **kwargs):
+        '''Obtain a list of wims, that are the WIM information at openmano
+        Params: can be filtered by 'uuid','name','wim_url','type'
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'wims':[{wim1_info},{wim2_info},...]}}
+        '''
+        return self._list_item("wims", all_tenants, filter_dict=kwargs)
+
+    def get_wim(self, uuid=None, name=None, all_tenants=False):
+        '''Obtain the information of a wim
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'wim':{wim_info}}
+        '''
+        return self._get_item("wims", uuid, name, all_tenants)
+
+    def delete_wim(self, uuid=None, name=None):
+        '''Delete a wim
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several, not free
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        if not uuid:
+            # check that exist
+            uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
+        return self._del_item("wims", uuid, name, all_tenants=None)
+
+    def create_wim(self, descriptor=None, descriptor_format=None, name=None, wim_url=None, **kwargs):
+        # , type="openvim", public=False, description=None):
+        '''Creates a wim
+        Params: must supply a descriptor or/and just a name and a wim_url
+            descriptor: with format {'wim':{new_wim_info}}
+                new_wim_info must contain 'name', 'wim_url', and optionally 'description'
+                must be a dictionary or a json/yaml text.
+            name: the wim name. Overwrite descriptor name if any
+            wim_url: the wim URL. Overwrite descriptor vim_url if any
+            wim_type: the WIM type, can be tapi, odl, onos. Overwrite descriptor type if any
+            public: boolean, by default not public
+            description: wim description. Overwrite descriptor description if any
+            config: dictionary with extra configuration for the concrete wim
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'wim:{new_wim_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif name and wim_url:
+            descriptor = {"wim": {"name": name, "wim_url": wim_url}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor, or name and wim_url")
+
+        if 'wim' not in descriptor or len(descriptor) != 1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
+        if name:
+            descriptor['wim']['name'] = name
+        if wim_url:
+            descriptor['wim']['wim_url'] = wim_url
+        for param in kwargs:
+            descriptor['wim'][param] = kwargs[param]
+
+        return self._create_item("wims", descriptor, all_tenants=None)
+
+    def edit_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False,
+                        **kwargs):
+        '''Edit the parameters of a wim
+        Params: must supply a descriptor or/and a parameter to change
+            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+            descriptor: with format {'wim':{params to change info}}
+                must be a dictionary or a json/yaml text.
+            parameters to change can be supplied by the descriptor or as parameters:
+                new_name: the wim name
+                wim_url: the wim URL
+                wim_type: the wim type, can be tapi, onos, odl
+                public: boolean, available to other tenants
+                description: wim description
+        Return: Raises an exception on error, not found or found several
+                Obtain a dictionary with format {'wim':{new_wim_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif kwargs:
+            descriptor = {"wim": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+
+        if 'wim' not in descriptor or len(descriptor) != 1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
+        for param in kwargs:
+            if param == 'new_name':
+                descriptor['wim']['name'] = kwargs[param]
+            else:
+                descriptor['wim'][param] = kwargs[param]
+        return self._edit_item("wims", descriptor, uuid, name, all_tenants=None)
+
+    def attach_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, wim_user=None,
+                          wim_password=None, wim_tenant_name=None, wim_tenant_id=None):
+        # check that exist
+        uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
+        tenant_text = "/" + self._get_tenant()
+
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif wim_user or wim_password or wim_tenant_name or wim_tenant_id:
+            descriptor = {"wim": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor or params")
+
+        if wim_user or wim_password or wim_tenant_name or wim_tenant_id:
+            # print args.name
+            try:
+                if wim_user:
+                    descriptor['wim']['wim_user'] = wim_user
+                if wim_password:
+                    descriptor['wim']['wim_password'] = wim_password
+                if wim_tenant_name:
+                    descriptor['wim']['wim_tenant_name'] = wim_tenant_name
+                if wim_tenant_id:
+                    descriptor['wim']['wim_tenant'] = wim_tenant_id
+            except (KeyError, TypeError) as e:
+                if str(e) == 'wim':
+                    error_pos = "missing field 'wim'"
+                else:
+                    error_pos = "wrong format"
+                raise OpenmanoBadParamsException("Wrong wim descriptor: " + error_pos)
+
+        payload_req = yaml.safe_dump(descriptor)
+        # print payload_req
+        URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
+        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+        mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
+        self.logger.debug("openmano response: %s", mano_response.text)
+
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code == 200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))
+
+    def detach_wim(self, uuid=None, name=None):
+        if not uuid:
+            # check that exist
+            uuid = self._get_item_uuid("wims", uuid, name, all_tenants=False)
+        tenant_text = "/" + self._get_tenant()
+        URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
+        self.logger.debug("openmano DELETE %s", URLrequest)
+        mano_response = requests.delete(URLrequest, headers=self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text)
+
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code == 200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))
+
+    #VNFS
+    def list_vnfs(self, all_tenants=False, **kwargs):
+        '''Obtain a list of vnfs
+        Params: can be filtered by 'uuid','name','description','public', "tenant_id"
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'vnfs':[{vnf1_info},{vnf2_info},...]}}
+        '''
+        return self._list_item("vnfs", all_tenants, kwargs)
+
+    def get_vnf(self, uuid=None, name=None, all_tenants=False):
+        '''Obtain the information of a vnf
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'vnf':{vnf_info}}
+        '''
+        return self._get_item("vnfs", uuid, name, all_tenants)
+
+    def delete_vnf(self, uuid=None, name=None, all_tenants=False):
+        '''Delete a vnf
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several, not free
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        return self._del_item("vnfs", uuid, name, all_tenants)
+
+    def create_vnf(self, descriptor=None, descriptor_format=None, **kwargs):
+        '''Creates a vnf
+        Params: must supply a descriptor
+            descriptor: with format {'vnf':{new_vnf_info}}
+                must be a dictionary or a json/yaml text.
+                must be a dictionary or a json/yaml text.
+            Other parameters can be:
+                #TODO, revise
+                name: the vnf name. Overwrite descriptor name if any
+                image_path: Can be a string or a string list. Overwrite the image_path at descriptor
+                description: vnf descriptor.. Overwrite descriptor description if any
+                public: boolean, available to other tenants
+                class: user text for vnf classification
+                tenant_id: Propietary tenant
+                ...
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'vnf':{new_vnf_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+
+        try:
+            if "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
+                api_version = "v3"
+                token = "vnfd"
+                vnfd_catalog = descriptor.get("vnfd:vnfd-catalog")
+                if not vnfd_catalog:
+                    vnfd_catalog = descriptor.get("vnfd-catalog")
+                vnfds = vnfd_catalog.get("vnfd:vnfd")
+                if not vnfds:
+                    vnfds = vnfd_catalog.get("vnfd")
+                vnfd = vnfds[0]
+                vdu_list = vnfd["vdu"]
+            elif "vnf" in descriptor:  # old API
+                api_version = None
+                token = "vnfs"
+                vnfd = descriptor['vnf']
+                vdu_list = vnfd["VNFC"]
+            else:
+                raise OpenmanoBadParamsException("Invalid VNF Descriptor must contain only one 'vnf' field or vnd-catalog")
+        except (KeyError, TypeError) as e:
+            raise OpenmanoBadParamsException("Invalid VNF Descriptor. Missing field {}".format(e))
+
+        if kwargs:
+            try:
+                if kwargs.get('name'):
+                    vnfd['name'] = kwargs['name']
+                if kwargs.get('description'):
+                    vnfd['description'] = kwargs['description']
+                if kwargs.get('image_path'):
+                    error_param = 'image_path'
+                    image_list = kwargs['image_path'].split(",")
+                    image_item = image_list.pop(0)
+                    # print "image-path", image_path_
+                    for vdu in vdu_list:
+                        if api_version == "v3":
+                            if vdu.get("image"):
+                                if image_item:
+                                    vdu['image'] = image_item
+                                    if "image-checksum" in vdu:
+                                        del vdu["image-checksum"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                            for vol in vdu.get("volumes", ()):  # image name in volumes
+                                if image_item:
+                                    vol["image"] = image_item
+                                    if "image-checksum" in vol:
+                                        del vol["image-checksum"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                        else:
+                            if image_item:
+                                vdu['VNFC image'] = image_item
+                                if "image name" in vdu:
+                                    del vdu["image name"]
+                                if "image checksum" in vdu:
+                                    del vdu["image checksum"]
+                            if image_list:
+                                image_item = image_list.pop(0)
+                            for vol in vdu.get('devices', ()):
+                                if vol['type'] != 'disk':
+                                    continue
+                                if image_item:
+                                    vol['image'] = image_item
+                                    if "image name" in vol:
+                                        del vol["image name"]
+                                    if "image checksum" in vol:
+                                        del vol["image checksum"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                if kwargs.get('image_name'):  # image name precedes if both are supplied
+                    error_param = 'image_name'
+                    image_list = kwargs['image_name'].split(",")
+                    image_item = image_list.pop(0)
+                    for vdu in vdu_list:
+                        if api_version == "v3":
+                            if vdu.get("image"):
+                                if image_item:
+                                    vdu['image'] = image_item
+                                    if "image-checksum" in vdu:
+                                        del vdu["image-checksum"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                            for vol in vdu.get("volumes", ()):  # image name in volumes
+                                if image_item:
+                                    vol["image"] = image_item
+                                    if "image-checksum" in vol:
+                                        del vol["image-checksum"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                        else:
+                            if image_item:
+                                vdu['image name'] = image_item
+                                if "VNFC image" in vdu:
+                                    del vdu["VNFC image"]
+                            if image_list:
+                                image_item = image_list.pop(0)
+                            for vol in vdu.get('devices', ()):
+                                if vol['type'] != 'disk':
+                                    continue
+                                if image_item:
+                                    vol['image name'] = image_item
+                                    if "image" in vol:
+                                        del vol["image"]
+                                    if "image checksum" in vol:
+                                        del vol["image checksum"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+
+                if kwargs.get('image_checksum'):
+                    error_param = 'image_checksum'
+                    image_list = kwargs['image_checksum'].split(",")
+                    image_item = image_list.pop(0)
+                    for vdu in vdu_list:
+                        if api_version == "v3":
+                            if vdu.get("image"):
+                                if image_item:
+                                    vdu['image-checksum'] = image_item
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                            for vol in vdu.get("volumes", ()):  # image name in volumes
+                                if image_item:
+                                    vol["mage-checksum"] = image_item
+                                if image_list:
+                                    image_item = image_list.pop(0)
+                        else:
+                            if image_item:
+                                vdu['image checksum'] = image_item
+                                if "VNFC image" in vdu:
+                                    del vdu["VNFC image"]
+                            if image_list:
+                                image_item = image_list.pop(0)
+                            for vol in vdu.get('devices', ()):
+                                if vol['type'] != 'disk':
+                                    continue
+                                if image_item:
+                                    vol['image checksum'] = image_item
+                                    if "image" in vol:
+                                        del vol["image"]
+                                if image_list:
+                                    image_item = image_list.pop(0)
+            except IndexError:
+                raise OpenmanoBadParamsException("{} contains more items than {} at descriptor".format(
+                    error_param, "vnfd-catalog:vnfd:vdu" if api_version else "vnf:VNFC"))
+            except (KeyError, TypeError) as e:
+                raise OpenmanoBadParamsException("Invalid VNF Descriptor. Missing field {}".format(e))
+        return self._create_item(token, descriptor, api_version=api_version)
+
+#     def edit_vnf(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+#         '''Edit the parameters of a vnf
+#         Params: must supply a descriptor or/and a parameters to change
+#             uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+#             descriptor: with format {'vnf':{params to change info}}
+#             parameters to change can be supplyied by the descriptor or as parameters:
+#                 new_name: the vnf name
+#                 vim_url: the vnf URL
+#                 vim_url_admin: the vnf URL for administrative issues
+#                 vim_type: the vnf type, can be openstack or openvim.
+#                 public: boolean, available to other tenants
+#                 description: vnf description
+#         Return: Raises an exception on error, not found or found several
+#                 Obtain a dictionary with format {'vnf':{new_vnf_info}}
+#         '''
+# 
+#        if isinstance(descriptor, str):
+#            descriptor = self.parse(descriptor, descriptor_format)
+#        elif descriptor:
+#            pass
+#         elif kwargs:
+#             descriptor={"vnf": {}}
+#         else:
+#             raise OpenmanoBadParamsException("Missing descriptor")
+# 
+#         if 'vnf' not in descriptor or len(descriptor)>2:
+#             raise OpenmanoBadParamsException("Descriptor must contain only one 'vnf' field")
+#         for param in kwargs:
+#             if param=='new_name':
+#                 descriptor['vnf']['name'] = kwargs[param]
+#             else:
+#                 descriptor['vnf'][param] = kwargs[param]
+#         return self._edit_item("vnfs", descriptor, uuid, name, all_tenants=None)
+
+    #SCENARIOS
+    def list_scenarios(self, all_tenants=False, **kwargs):
+        '''Obtain a list of scenarios
+        Params: can be filtered by 'uuid','name','description','public', "tenant_id"
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'scenarios':[{scenario1_info},{scenario2_info},...]}}
+        '''
+        return self._list_item("scenarios", all_tenants, kwargs)
+
+    def get_scenario(self, uuid=None, name=None, all_tenants=False):
+        '''Obtain the information of a scenario
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'scenario':{scenario_info}}
+        '''
+        return self._get_item("scenarios", uuid, name, all_tenants)
+
+    def delete_scenario(self, uuid=None, name=None, all_tenants=False):
+        '''Delete a scenario
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several, not free
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        return self._del_item("scenarios", uuid, name, all_tenants)
+
+    def create_scenario(self, descriptor=None, descriptor_format=None, **kwargs):
+        """Creates a scenario
+        Params: must supply a descriptor
+            descriptor: with format {'scenario':{new_scenario_info}}
+                must be a dictionary or a json/yaml text.
+            Other parameters can be:
+                name: the scenario name. Overwrite descriptor name if any
+                description: scenario descriptor.. Overwrite descriptor description if any
+                public: boolean, available to other tenants
+                tenant_id. Propietary tenant
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'scenario':{new_scenario_info}}
+        """
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+        
+        try:
+            if "nsd:nsd-catalog" in descriptor or "nsd-catalog" in descriptor:
+                api_version = "v3"
+                token = "nsd"
+                nsd_catalog = descriptor.get("nsd:nsd-catalog")
+                if not nsd_catalog:
+                    nsd_catalog = descriptor.get("nsd-catalog")
+                nsds = nsd_catalog.get("nsd:nsd")
+                if not nsds:
+                    nsds = nsd_catalog.get("nsd")
+                nsd = nsds[0]
+            elif "scenario" in descriptor:  # old API
+                api_version = None
+                token = "scenarios"
+                nsd = descriptor['scenario']
+            else:
+                raise OpenmanoBadParamsException("Invalid NS Descriptor must contain only one 'scenario' field or nsd-catalog")
+        except (KeyError, TypeError) as e:
+            raise OpenmanoBadParamsException("Invalid NS Descriptor. Missing field {}".format(e))
+
+        for param in kwargs:
+            nsd[param] = kwargs[param]
+        return self._create_item(token, descriptor, api_version=api_version)
+
+    def edit_scenario(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
+        '''Edit the parameters of a scenario
+        Params: must supply a descriptor or/and a parameters to change
+            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+            descriptor: with format {'scenario':{params to change info}}
+                must be a dictionary or a json/yaml text.
+            parameters to change can be supplyied by the descriptor or as parameters:
+                new_name: the scenario name
+                public: boolean, available to other tenants
+                description: scenario description
+                tenant_id. Propietary tenant
+        Return: Raises an exception on error, not found or found several
+                Obtain a dictionary with format {'scenario':{new_scenario_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif kwargs:
+            descriptor={"scenario": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+        if 'scenario' not in descriptor or len(descriptor)>2:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'scenario' field")
+        for param in kwargs:
+            if param=='new_name':
+                descriptor['scenario']['name'] = kwargs[param]
+            else:
+                descriptor['scenario'][param] = kwargs[param]
+        return self._edit_item("scenarios", descriptor, uuid, name, all_tenants=None)
+
+
+    #INSTANCE-SCENARIOS
+    def list_instances(self, all_tenants=False, **kwargs):
+        '''Obtain a list of instances
+        Params: can be filtered by 'uuid','name','description','scenario_id', "tenant_id"
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'instances':[{instance1_info},{instance2_info},...]}}
+        '''
+        return self._list_item("instances", all_tenants, kwargs)
+
+    def get_instance(self, uuid=None, name=None, all_tenants=False):
+        '''Obtain the information of a instance
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'instance':{instance_info}}
+        '''
+        return self._get_item("instances", uuid, name, all_tenants)
+
+    def delete_instance(self, uuid=None, name=None, all_tenants=False):
+        '''Delete a instance
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several, not free
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        return self._del_item("instances", uuid, name, all_tenants)
+
+    def create_instance(self, descriptor=None, descriptor_format=None, name=None, **kwargs):
+        '''Creates a instance
+        Params: must supply a descriptor or/and a name and scenario
+            descriptor: with format {'instance':{new_instance_info}}
+                must be a dictionary or a json/yaml text.
+            name: the instance name. Overwrite descriptor name if any
+            Other parameters can be:
+                description: instance descriptor.. Overwrite descriptor description if any
+                datacenter_name, datacenter_id: datacenter  where to be deployed
+                scenario_name, scenario_id: Scenario this instance is based on
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'instance':{new_instance_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif name and ("scenario_name" in kwargs or "scenario_id" in kwargs):
+            descriptor = {"instance": {"name": name}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+        
+        if 'instance' not in descriptor or len(descriptor)>2:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'instance' field, and an optional version")
+        if name:
+            descriptor['instance']["name"] = name
+        if "scenario_name" in kwargs or "scenario_id" in kwargs:
+            descriptor['instance']["scenario"] = self._get_item_uuid("scenarios", kwargs.get("scenario_id"), kwargs.get("scenario_name"))
+        if "datacenter_name" in kwargs or "datacenter_id" in kwargs:
+            descriptor['instance']["datacenter"] = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"))
+        if "description" in kwargs:
+            descriptor['instance']["description"] = kwargs.get("description")
+        #for param in kwargs:
+        #    descriptor['instance'][param] = kwargs[param]
+        if "datacenter" not in descriptor['instance']:
+            descriptor['instance']["datacenter"] = self._get_datacenter()
+        return self._create_item("instances", descriptor)
+
+    #VIM ACTIONS
+    def vim_action(self, action, item, uuid=None, all_tenants=False, **kwargs):
+        '''Perform an action over a vim
+        Params: 
+            action: can be 'list', 'get'/'show', 'delete' or 'create'
+            item: can be 'tenants' or 'networks'
+            uuid: uuid of the tenant/net to show or to delete. Ignore otherwise
+            other parameters:
+                datacenter_name, datacenter_id: datacenters to act on, if missing uses classes store datacenter 
+                descriptor, descriptor_format: descriptor needed on creation, can be a dict or a yaml/json str 
+                    must be a dictionary or a json/yaml text.
+                name: for created tenant/net Overwrite descriptor name if any
+                description: tenant descriptor. Overwrite descriptor description if any
+                
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'tenant':{new_tenant_info}}
+        '''
+        if item not in ("tenants", "networks", "images"):
+            raise OpenmanoBadParamsException("Unknown value for item '{}', must be 'tenants', 'nets' or "
+                                             "images".format(str(item)))
+
+        image_actions = ['list','get','show','delete']
+        if item == "images" and action not in image_actions:
+            raise OpenmanoBadParamsException("Only available actions for item '{}' are {}\n"
+                                             "Requested action was '{}'".format(item, ', '.join(image_actions), action))
+        if all_tenants:
+            tenant_text = "/any"
+        else:
+            tenant_text = "/"+self._get_tenant()
+        
+        if "datacenter_id" in kwargs or "datacenter_name" in kwargs:
+            datacenter = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"), all_tenants=all_tenants)
+        else:
+            datacenter = self._get_datacenter()
+
+        if action=="list":
+            URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
+            self.logger.debug("GET %s", URLrequest )
+            mano_response = requests.get(URLrequest, headers=self.headers_req)
+            self.logger.debug("openmano response: %s", mano_response.text )
+            content = self._parse_yaml(mano_response.text, response=True)            
+            if mano_response.status_code==200:
+                return content
+            else:
+                raise OpenmanoResponseException(str(content))        
+        elif action=="get" or action=="show":
+            URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
+            self.logger.debug("GET %s", URLrequest )
+            mano_response = requests.get(URLrequest, headers=self.headers_req)
+            self.logger.debug("openmano response: %s", mano_response.text )
+            content = self._parse_yaml(mano_response.text, response=True)            
+            if mano_response.status_code==200:
+                return content
+            else:
+                raise OpenmanoResponseException(str(content))        
+        elif action=="delete":
+            URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
+            self.logger.debug("DELETE %s", URLrequest )
+            mano_response = requests.delete(URLrequest, headers=self.headers_req)
+            self.logger.debug("openmano response: %s", mano_response.text )
+            content = self._parse_yaml(mano_response.text, response=True)            
+            if mano_response.status_code==200:
+                return content
+            else:
+                raise OpenmanoResponseException(str(content))        
+        elif action=="create":
+            if "descriptor" in kwargs:
+                if isinstance(kwargs["descriptor"], str):
+                    descriptor = self._parse(kwargs["descriptor"], kwargs.get("descriptor_format") )
+                else:
+                    descriptor = kwargs["descriptor"]
+            elif "name" in kwargs:
+                descriptor={item[:-1]: {"name": kwargs["name"]}}
+            else:
+                raise OpenmanoResponseException("Missing descriptor")
+        
+            if item[:-1] not in descriptor or len(descriptor)!=1:
+                raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
+            if "name" in kwargs:
+                descriptor[ item[:-1] ]['name'] = kwargs["name"]
+            if "description" in kwargs:
+                descriptor[ item[:-1] ]['description'] = kwargs["description"]
+            payload_req = yaml.safe_dump(descriptor)
+            #print payload_req
+            URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
+            self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+            mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
+            self.logger.debug("openmano response: %s", mano_response.text )
+            content = self._parse_yaml(mano_response.text, response=True)
+            if mano_response.status_code==200:
+                return content
+            else:
+                raise OpenmanoResponseException(str(content))
+        else:
+            raise OpenmanoBadParamsException("Unknown value for action '{}".format(str(action))) 
+
diff --git a/RO/osm_ro/openmanod.cfg b/RO/osm_ro/openmanod.cfg
new file mode 100644 (file)
index 0000000..3565bbf
--- /dev/null
@@ -0,0 +1,88 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#HTTP Server parameters (MANO API). IP address and port where openmanod listens
+# IPtables/firewalld must allow this port
+# for CentOS/Redhad firewalld is configured at '/etc/firewalld/services/openmanod.xml'
+# edit this file and reload firewalld with 'firewall-cmd --reload' if port is changed
+http_host:       0.0.0.0      # IP address, (by default, 0.0.0.0 means that it will listen in all interfaces)
+http_port:       9090         # General port (by default, 9090)
+#http_admin_port: 9095        # Admin port where openmano is listening (when missing, no administration server is launched)
+                              # Not used in current version!
+
+#Parameters for a VIM console access. Can be directly the VIM URL or a proxy to offer the openmano IP address
+http_console_proxy: False    #by default True. If False proxy is not implemented and VIM URL is offered. It is
+                              #assumed then, that client can access directly to the VIMs
+#http_console_host: <ip>       #by default the same as 'http_host'. However is openmano server is behind a NAT/proxy
+                              #you should specify the public IP used to access the server. Also when 'http_host' is 
+                              #0.0.0.0 you should specify the concrete IP address (or name) the server is accessed 
+# Ports to be used. Comma separated list. Can contain a {"from":<port>, "to":<port>} entry
+#e.g. from 9000 to 9005: [{"from":9000, "to":9005}], or also [9000,9001,9002,9003,9004,9005]
+#e.g. from 9000 to 9100 apart from 9050,9053: [{"from":9000, "to":9049},9051,9052,{"from":9054, "to":9099}]
+http_console_ports: [{"from":9096, "to":9110}]
+
+#Database parameters
+db_host:   localhost          # by default localhost
+db_user:   mano               # DB user
+db_passwd: manopw             # DB password
+db_name:   mano_db            # Name of the MANO DB
+# Database ovim parameters
+db_ovim_host:   localhost          # by default localhost
+db_ovim_user:   mano               # DB user
+db_ovim_passwd: manopw             # DB password
+db_ovim_name:   mano_vim_db        # Name of the OVIM MANO DB
+
+
+#other MANO parameters
+#  Folder where the VNF descriptors will be stored
+#  The folder will be created in the execution folder if it does not exist
+#vnf_repository: "./vnfrepo"  # Use an absolute path to avoid misunderstandings
+
+#   Indicates if at VNF onboarding, flavors and images are loaded at all related VIMs,
+#   in order to speed up the later instantiation.
+auto_push_VNF_to_VIMs: False  # by default True
+
+#general logging parameters 
+   #choose among: DEBUG, INFO, WARNING, ERROR, CRITICAL
+log_level:         INFO  #general log levels for internal logging
+#standard output is used unless 'log_file' is specify 
+#log_file:          /var/log/openmano/openmano.log
+
+#individual logging settings
+log_level_db:      ERROR  #database log levels
+#log_file_db:       /opt/openmano/logs/openmano_db.log
+#log_level_vim:     DEBUG  #VIM connection log levels
+#log_file_vim:      /opt/openmano/logs/openmano_vimconn.log
+#log_level_wim:     DEBUG  #WIM connection log levels
+#log_file_wim:      /opt/openmano/logs/openmano_wimconn.log
+#log_level_nfvo:    DEBUG  #Main engine log levels
+#log_file_nfvo:     /opt/openmano/logs/openmano_nfvo.log
+#log_level_http:    DEBUG  #Main engine log levels
+#log_file_http:     /opt/openmano/logs/openmano_http.log
+#log_level_console: DEBUG  #proxy console log levels
+#log_file_console:  /opt/openmano/logs/openmano_console.log
+#log_level_ovim:    DEBUG  #ovim library log levels
+#log_file_ovim:     /opt/openmano/logs/openmano_ovim.log
+
+#Uncomment to send logs via IP to an external host
+#log_socket_host:   localhost
+log_socket_port:   9022
+log_socket_level:  DEBUG  #general log levels for socket logging      
diff --git a/RO/osm_ro/openmanod.py b/RO/osm_ro/openmanod.py
new file mode 100755 (executable)
index 0000000..cdf451a
--- /dev/null
@@ -0,0 +1,379 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+openmano server.
+Main program that implements a reference NFVO (Network Functions Virtualisation Orchestrator).
+It interfaces with an NFV VIM through its API and offers a northbound interface, based on REST (openmano API),
+where NFV services are offered including the creation and deletion of VNF templates, VNF instances,
+network service templates and network service instances.
+
+It loads the configuration file and launches the http_server thread that will listen requests using openmano API.
+"""
+
+import time
+import sys
+import getopt
+import yaml
+from os import environ, path as os_path
+from jsonschema import validate as js_v, exceptions as js_e
+import logging
+import logging.handlers as log_handlers
+import socket
+
+from yaml import MarkedYAMLError
+
+from osm_ro import httpserver, nfvo, nfvo_db
+from osm_ro.openmano_schemas import config_schema
+from osm_ro.db_base import db_base_Exception
+from osm_ro.wim.engine import WimEngine
+from osm_ro.wim.persistence import WimPersistence
+import osm_ro
+
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+__date__ = "$26-aug-2014 11:09:29$"
+__version__ = "6.0.3.post5"
+version_date = "Oct 2019"
+database_version = 39      # expected database schema version
+
+global global_config
+global logger
+
+
+class LoadConfigurationException(Exception):
+    pass
+
+
+def load_configuration(configuration_file):
+    default_tokens = {'http_port': 9090,
+                      'http_host': 'localhost',
+                      'http_console_proxy': True,
+                      'http_console_host': None,
+                      'log_level': 'DEBUG',
+                      'log_socket_port': 9022,
+                      'auto_push_VNF_to_VIMs': True,
+                      'db_host': 'localhost',
+                      'db_ovim_host': 'localhost'
+                      }
+    try:
+        # Check config file exists
+        with open(configuration_file, 'r') as f:
+            config_str = f.read()
+        # Parse configuration file
+        config = yaml.load(config_str, Loader=yaml.SafeLoader)
+        # Validate configuration file with the config_schema
+        js_v(config, config_schema)
+
+        # Add default values tokens
+        for k, v in default_tokens.items():
+            if k not in config:
+                config[k] = v
+        return config
+
+    except yaml.YAMLError as e:
+        error_pos = ""
+        if isinstance(e, MarkedYAMLError):
+            mark = e.problem_mark
+            error_pos = " at line:{} column:{}".format(mark.line + 1, mark.column + 1)
+        raise LoadConfigurationException("Bad YAML format at configuration file '{file}'{pos}: {message}".format(
+            file=configuration_file, pos=error_pos, message=e))
+    except js_e.ValidationError as e:
+        error_pos = ""
+        if e.path:
+            error_pos = " at '" + ":".join(map(str, e.path)) + "'"
+        raise LoadConfigurationException("Invalid field at configuration file '{file}'{pos} {message}".format(
+            file=configuration_file, pos=error_pos, message=e))
+    except Exception as e:
+        raise LoadConfigurationException("Cannot load configuration file '{file}' {message}".format(
+            file=configuration_file, message=e))
+
+
+def console_port_iterator():
+    """
+    this iterator deals with the http_console_ports
+    returning the ports one by one
+    """
+    index = 0
+    while index < len(global_config["http_console_ports"]):
+        port = global_config["http_console_ports"][index]
+        if type(port) is int:
+            yield port
+        else:  # this is dictionary with from to keys
+            port2 = port["from"]
+            while port2 <= port["to"]:
+                yield port2
+                port2 += 1
+        index += 1
+
+
+def usage():
+    print("Usage: ", sys.argv[0], "[options]")
+    print("      -v|--version: prints current version")
+    print("      -c|--config [configuration_file]: loads the configuration file (default: openmanod.cfg)")
+    print("      -h|--help: shows this help")
+    print(
+        "      -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)")
+    print(
+        "      -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)")
+    print("      --log-socket-host HOST: send logs to this host")
+    print("      --log-socket-port PORT: send logs using this port (default: 9022)")
+    print("      --log-file FILE: send logs to this file")
+    print(
+        "      --create-tenant NAME: Try to creates this tenant name before starting, ignoring any errors as e.g. conflict")
+    return
+
+
+def set_logging_file(log_file):
+    try:
+        file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=100e6, backupCount=9, delay=0)
+        file_handler.setFormatter(log_formatter_simple)
+        logger.addHandler(file_handler)
+        # remove initial stream handler
+        logging.root.removeHandler(logging.root.handlers[0])
+        print ("logging on '{}'".format(log_file))
+    except IOError as e:
+        raise LoadConfigurationException(
+            "Cannot open logging file '{}': {}. Check folder exist and permissions".format(log_file, e))
+
+
+if __name__ == "__main__":
+    # env2config contains environ variable names and the correspondence with configuration file openmanod.cfg keys.
+    # If this environ is defined, this value is taken instead of the one at at configuration file
+    env2config = {
+        'RO_DB_HOST': 'db_host',
+        'RO_DB_NAME': 'db_name',
+        'RO_DB_USER': 'db_user',
+        'RO_DB_PASSWORD': 'db_passwd',
+        'RO_DB_OVIM_HOST': 'db_ovim_host',
+        'RO_DB_OVIM_NAME': 'db_ovim_name',
+        'RO_DB_OVIM_USER': 'db_ovim_user',
+        'RO_DB_OVIM_PASSWORD': 'db_ovim_passwd',
+        'RO_LOG_LEVEL': 'log_level',
+        'RO_LOG_FILE': 'log_file',
+    }
+    # Configure logging step 1
+    hostname = socket.gethostname()
+    log_formatter_str = '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'
+    log_formatter_complete = logging.Formatter(log_formatter_str.format(host=hostname), datefmt='%Y-%m-%dT%H:%M:%S')
+    log_format_simple = "%(asctime)s %(levelname)s  %(name)s %(thread)d %(filename)s:%(lineno)s %(message)s"
+    log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
+    logging.basicConfig(format=log_format_simple, level=logging.DEBUG)
+    logger = logging.getLogger('openmano')
+    logger.setLevel(logging.DEBUG)
+    socket_handler = None
+    # Read parameters and configuration file
+    httpthread = None
+    try:
+        # load parameters and configuration
+        opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:",
+                                   ["config=", "help", "version", "port=", "vnf-repository=", "adminport=",
+                                    "log-socket-host=", "log-socket-port=", "log-file=", "create-tenant="])
+        port = None
+        port_admin = None
+        config_file = 'openmanod.cfg'
+        vnf_repository = None
+        log_file = None
+        log_socket_host = None
+        log_socket_port = None
+        create_tenant = None
+
+        for o, a in opts:
+            if o in ("-v", "--version"):
+                print ("openmanod version " + __version__ + ' ' + version_date)
+                print ("(c) Copyright Telefonica")
+                sys.exit()
+            elif o in ("-h", "--help"):
+                usage()
+                sys.exit()
+            elif o in ("-V", "--vnf-repository"):
+                vnf_repository = a
+            elif o in ("-c", "--config"):
+                config_file = a
+            elif o in ("-p", "--port"):
+                port = a
+            elif o in ("-P", "--adminport"):
+                port_admin = a
+            elif o == "--log-socket-port":
+                log_socket_port = a
+            elif o == "--log-socket-host":
+                log_socket_host = a
+            elif o == "--log-file":
+                log_file = a
+            elif o == "--create-tenant":
+                create_tenant = a
+            else:
+                assert False, "Unhandled option"
+        if log_file:
+            set_logging_file(log_file)
+        global_config = load_configuration(config_file)
+        global_config["version"] = __version__
+        global_config["version_date"] = version_date
+        # Override parameters obtained by command line on ENV
+        if port:
+            global_config['http_port'] = port
+        if port_admin:
+            global_config['http_admin_port'] = port_admin
+        if log_socket_host:
+            global_config['log_socket_host'] = log_socket_host
+        if log_socket_port:
+            global_config['log_socket_port'] = log_socket_port
+
+        # override with ENV
+        for env_k, env_v in environ.items():
+            try:
+                if not env_k.startswith("RO_") or env_k not in env2config or not env_v:
+                    continue
+                global_config[env2config[env_k]] = env_v
+                if env_k.endswith("PORT"):  # convert to int, skip if not possible
+                    global_config[env2config[env_k]] = int(env_v)
+            except Exception as e:
+                logger.warn("skipping environ '{}={}' because exception '{}'".format(env_k, env_v, e))
+
+        global_config["console_port_iterator"] = console_port_iterator
+        global_config["console_thread"] = {}
+        global_config["console_ports"] = {}
+        if not global_config["http_console_host"]:
+            global_config["http_console_host"] = global_config["http_host"]
+            if global_config["http_host"] == "0.0.0.0":
+                global_config["http_console_host"] = socket.gethostname()
+
+        # Configure logging STEP 2
+        if "log_host" in global_config:
+            socket_handler = log_handlers.SocketHandler(global_config["log_socket_host"],
+                                                        global_config["log_socket_port"])
+            socket_handler.setFormatter(log_formatter_complete)
+            if global_config.get("log_socket_level") \
+                    and global_config["log_socket_level"] != global_config["log_level"]:
+                socket_handler.setLevel(global_config["log_socket_level"])
+            logger.addHandler(socket_handler)
+
+        if log_file:
+            global_config['log_file'] = log_file
+        elif global_config.get('log_file'):
+            set_logging_file(global_config['log_file'])
+
+        logger.setLevel(getattr(logging, global_config['log_level']))
+        logger.critical("Starting openmano server version: '%s %s' command: '%s'",
+                        __version__, version_date, " ".join(sys.argv))
+
+        for log_module in ("nfvo", "http", "vim", "wim", "db", "console", "ovim"):
+            log_level_module = "log_level_" + log_module
+            log_file_module = "log_file_" + log_module
+            logger_module = logging.getLogger('openmano.' + log_module)
+            if log_level_module in global_config:
+                logger_module.setLevel(global_config[log_level_module])
+            if log_file_module in global_config:
+                try:
+                    file_handler = logging.handlers.RotatingFileHandler(global_config[log_file_module],
+                                                                        maxBytes=100e6, backupCount=9, delay=0)
+                    file_handler.setFormatter(log_formatter_simple)
+                    logger_module.addHandler(file_handler)
+                except IOError as e:
+                    raise LoadConfigurationException(
+                        "Cannot open logging file '{}': {}. Check folder exist and permissions".format(
+                            global_config[log_file_module], str(e)))
+            global_config["logger_" + log_module] = logger_module
+
+        # Initialize DB connection
+        mydb = nfvo_db.nfvo_db()
+        mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'],
+                     global_config['db_name'])
+        db_path = osm_ro.__path__[0] + "/database_utils"
+        if not os_path.exists(db_path + "/migrate_mano_db.sh"):
+            db_path = osm_ro.__path__[0] + "/../database_utils"
+        try:
+            r = mydb.get_db_version()
+            if r[0] != database_version:
+                logger.critical("DATABASE wrong version '{current}'. Try to upgrade/downgrade to version '{target}'"
+                                " with '{db_path}/migrate_mano_db.sh {target}'".format(current=r[0],
+                                                                                       target=database_version,
+                                                                                       db_path=db_path))
+                exit(-1)
+        except db_base_Exception as e:
+            logger.critical("DATABASE is not valid. If you think it is corrupted, you can init it with"
+                            " '{db_path}/init_mano_db.sh' script".format(db_path=db_path))
+            exit(-1)
+
+        nfvo.global_config = global_config
+        if create_tenant:
+            try:
+                nfvo.new_tenant(mydb, {"name": create_tenant})
+            except Exception as e:
+                if isinstance(e, nfvo.NfvoException) and e.http_code == 409:
+                    pass  # if tenant exist (NfvoException error 409), ignore
+                else:  # otherwise print and error and continue
+                    logger.error("Cannot create tenant '{}': {}".format(create_tenant, e))
+
+        # WIM module
+        wim_persistence = WimPersistence(mydb)
+        wim_engine = WimEngine(wim_persistence)
+        # ---
+        nfvo.start_service(mydb, wim_persistence, wim_engine)
+
+        httpthread = httpserver.httpserver(
+            mydb, False,
+            global_config['http_host'], global_config['http_port'],
+            wim_persistence, wim_engine
+        )
+
+        httpthread.start()
+        if 'http_admin_port' in global_config:
+            httpthreadadmin = httpserver.httpserver(mydb, True, global_config['http_host'],
+                                                    global_config['http_admin_port'])
+            httpthreadadmin.start()
+        time.sleep(1)
+        logger.info('Waiting for http clients')
+        print('Waiting for http clients')
+        print('openmanod ready')
+        print('====================')
+        time.sleep(20)
+        sys.stdout.flush()
+
+        # TODO: Interactive console must be implemented here instead of join or sleep
+
+        # httpthread.join()
+        # if 'http_admin_port' in global_config:
+        #    httpthreadadmin.join()
+        while True:
+            time.sleep(86400)
+
+    except KeyboardInterrupt as e:
+        logger.info(str(e))
+    except SystemExit:
+        pass
+    except getopt.GetoptError as e:
+        logger.critical(str(e))  # will print something like "option -a not recognized"
+        exit(-1)
+    except LoadConfigurationException as e:
+        logger.critical(str(e))
+        exit(-1)
+    except db_base_Exception as e:
+        logger.critical(str(e))
+        exit(-1)
+    except nfvo.NfvoException as e:
+        logger.critical(str(e), exc_info=True)
+        exit(-1)
+    nfvo.stop_service()
+    if httpthread:
+        httpthread.join(1)
diff --git a/RO/osm_ro/osm-ro.service b/RO/osm_ro/osm-ro.service
new file mode 100644 (file)
index 0000000..2246885
--- /dev/null
@@ -0,0 +1,11 @@
+[Unit]
+Description=openmano server (OSM RO)
+After=mysql.service
+
+[Service]
+ExecStart=/usr/bin/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/RO/osm_ro/scripts/RO-of b/RO/osm_ro/scripts/RO-of
new file mode 100755 (executable)
index 0000000..1e5c917
--- /dev/null
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# author: Alfonso Tierno
+
+# to get docker id that is running:
+# $ docker_ro=`docker service ps osm_ro -f desired-state=running --format "{{.Name}}.{{.ID}}" --no-trunc`
+# exec with:
+# $ docker exec -ti $docker_ro RO-of
+
+function get_from_db()
+{
+    echo  "select $1 from $2 where name='$3' or uuid='$3';" | mysql -h"$RO_DB_HOST" -u"$RO_DB_OVIM_USER" -p"$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME"  2>/dev/null | tail -n1
+}
+
+[ -z "$RO_DB_OVIM_HOST" ] && export RO_DB_OVIM_HOST="$RO_DB_HOST"
+
+if [ -z "$1" ] ; then
+    echo "usage '$0 <sdn_controller> command'"
+    echo
+    echo "available sdn_controllers are:"
+    echo  "select uuid, name, type, ip, dpid, status from ofcs;" | mysql -h"$RO_DB_HOST" -u"$RO_DB_OVIM_USER" -p"$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME"  2>/dev/null
+    exit
+fi
+
+
+export OF_CONTROLLER_DPID=`get_from_db dpid ofcs $1`
+[ -z "$OF_CONTROLLER_DPID" ] && echo "Cannot find sdn_controller '$1' at database" >&2 && exit 1
+
+export OF_CONTROLLER_IP=`get_from_db ip ofcs $1`
+export OF_CONTROLLER_PORT=`get_from_db port ofcs $1`
+export OF_CONTROLLER_USER=`get_from_db user ofcs $1`
+export OF_CONTROLLER_PASSWORD=`get_from_db password ofcs $1`
+export OF_CONTROLLER_TYPE=`get_from_db type ofcs $1`
+
+shift
+openflow-lib "$@"
+
+
+
diff --git a/RO/osm_ro/scripts/RO-start.sh b/RO/osm_ro/scripts/RO-start.sh
new file mode 100755 (executable)
index 0000000..1a8750d
--- /dev/null
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+# This script is intended for launching RO from a docker container.
+# It waits for mysql server ready, normally running on a separate container, ...
+# then it checks if database is present and creates it if needed.
+# Finally it launches RO server.
+
+[ -z "$RO_DB_OVIM_HOST" ] && export RO_DB_OVIM_HOST="$RO_DB_HOST"
+[ -z "$RO_DB_OVIM_ROOT_PASSWORD" ] && export RO_DB_OVIM_ROOT_PASSWORD="$RO_DB_ROOT_PASSWORD"
+
+function is_db_created() {
+    db_host=$1
+    db_port=$2
+    db_user=$3
+    db_pswd=$4
+    db_name=$5
+    db_version=$6  # minimun database version
+
+    if mysqlshow -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" | grep -v Wildcard | grep -q -e "$db_name" ; then
+        if echo "SELECT comments FROM schema_version WHERE version_int=0;" |
+                mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
+                grep -q -e "init" ; then
+            echo " DB $db_name exists BUT failed in previous init" >&2
+            return 1
+        elif echo "SELECT * FROM schema_version WHERE version_int=$db_version;" |
+                mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
+                grep -q -e "$db_version" ; then
+            echo " DB $db_name exists and inited" >&2
+            return 0
+        else
+            echo " DB $db_name exists BUT not inited" >&2
+            return 1
+        fi
+    fi
+    echo " DB $db_name does not exist" >&2
+    return 1
+}
+
+function configure(){
+    #Database parameters
+    #db_host:   localhost
+    #db_user:   mano
+    #db_passwd: manopw
+    #db_name:   mano_db
+    # Database ovim parameters
+    #db_ovim_host:   localhost          # by default localhost
+    #db_ovim_user:   mano               # DB user
+    #db_ovim_passwd: manopw             # DB password
+    #db_ovim_name:   mano_vim_db        # Name of the OVIM MANO DB
+
+
+    sed -i "s/^db_host:.*/db_host: $RO_DB_HOST/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_user:.*/db_user: $RO_DB_USER/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_passwd:.*/db_passwd: $RO_DB_PASSWORD/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_name:.*/db_name: $RO_DB_NAME/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_ovim_host:.*/db_ovim_host: $RO_DB_OVIM_HOST/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_ovim_user:.*/db_ovim_user: $RO_DB_OVIM_USER/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_ovim_passwd:.*/db_ovim_passwd: $RO_DB_OVIM_PASSWORD/" /etc/osm/openmanod.cfg || return 1
+    sed -i "s/^db_ovim_name:.*/db_ovim_name: $RO_DB_OVIM_NAME/" /etc/osm/openmanod.cfg || return 1
+    return 0
+}
+
+max_attempts=120
+function wait_db(){
+    db_host=$1
+    db_port=$2
+    attempt=0
+    echo "Wait until $max_attempts seconds for MySQL mano Server ${db_host}:${db_port} "
+    while ! mysqladmin ping -h"$db_host" -P"$db_port" --silent; do
+        #wait 120 sec
+        if [ $attempt -ge $max_attempts ]; then
+            echo
+            echo "Cannot connect to database ${db_host}:${db_port} during $max_attempts sec" >&2
+            return 1
+        fi
+        attempt=$[$attempt+1]
+        echo -n "."
+        sleep 1
+    done
+    return 0
+}
+
+
+echo "1/4 Apply config"
+# this is not needed anymore because envioron overwrites config file
+# configure || exit 1
+
+
+echo "2/4 Wait for db up"
+wait_db "$RO_DB_HOST" "$RO_DB_PORT" || exit 1
+[ "$RO_DB_OVIM_HOST" = "$RO_DB_HOST" ] ||  wait_db "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" || exit 1
+
+
+echo "3/4 Init database"
+RO_PATH=`python3 -c 'import osm_ro; print(osm_ro.__path__[0])'`
+echo "RO_PATH: $RO_PATH"
+if ! is_db_created "$RO_DB_HOST" "$RO_DB_PORT" "$RO_DB_USER" "$RO_DB_PASSWORD" "$RO_DB_NAME" "27"
+then
+    if [ -n "$RO_DB_ROOT_PASSWORD" ] ; then
+        mysqladmin -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" create "$RO_DB_NAME"
+        echo "CREATE USER '${RO_DB_USER}'@'%' IDENTIFIED BY '${RO_DB_PASSWORD}';" |
+            mysql -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" || echo "user ${RO_DB_USER} already created?"
+        echo "GRANT ALL PRIVILEGES ON ${RO_DB_NAME}.* TO '${RO_DB_USER}'@'%';" |
+            mysql -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD"  || echo "user ${RO_DB_USER} already granted?"
+    fi
+    ${RO_PATH}/database_utils/init_mano_db.sh  -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
+        -P "${RO_DB_PORT}" -d "${RO_DB_NAME}" || exit 1
+else
+    echo "  migrate database version"
+    ${RO_PATH}/database_utils/migrate_mano_db.sh -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
+        -P "$RO_DB_PORT" -d "$RO_DB_NAME" -b /var/log/osm
+fi
+
+# TODO py3 BEGIN
+#OVIM_PATH=`python3 -c 'import lib_osm_openvim; print(lib_osm_openvim.__path__[0])'`
+#echo "OVIM_PATH: $OVIM_PATH"
+#if ! is_db_created "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" "$RO_DB_OVIM_USER" "$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME" \
+#    "22"
+#then
+#    if [ -n "$RO_DB_OVIM_ROOT_PASSWORD" ] ; then
+#        mysqladmin -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" create "$RO_DB_OVIM_NAME"
+#        echo "CREATE USER '${RO_DB_OVIM_USER}'@'%' IDENTIFIED BY '${RO_DB_OVIM_PASSWORD}';" |
+#            mysql -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" ||
+#            echo "user ${RO_DB_OVIM_USER} already created?"
+#        echo "GRANT ALL PRIVILEGES ON ${RO_DB_OVIM_NAME}.* TO '${RO_DB_OVIM_USER}'@'%';" |
+#            mysql -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD"  ||
+#            echo "user ${RO_DB_OVIM_USER} already granted?"
+#    fi
+#    ${OVIM_PATH}/database_utils/init_vim_db.sh  -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST" \
+#        -P "${RO_DB_OVIM_PORT}" -d "${RO_DB_OVIM_NAME}" || exit 1
+#else
+#    echo "  migrate database version"
+#    ${OVIM_PATH}/database_utils/migrate_vim_db.sh -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST"\
+#        -P "$RO_DB_OVIM_PORT" -d "$RO_DB_OVIM_NAME" -b /var/log/osm
+#fi
+# TODO py3 END
+
+echo "4/4 Try to start"
+# look for openmanod.cfg
+RO_CONFIG_FILE="/etc/osm/openmanod.cfg"
+[ -f "$RO_CONFIG_FILE" ] || RO_CONFIG_FILE=$(python3 -c 'import osm_ro; print(osm_ro.__path__[0])')/openmanod.cfg
+[ -f "$RO_CONFIG_FILE" ] || ! echo "configuration file 'openmanod.cfg' not found" || exit 1
+
+python3 -m osm_ro.openmanod -c "$RO_CONFIG_FILE"  --create-tenant=osm  # --log-file=/var/log/osm/openmano.log
+
diff --git a/RO/osm_ro/scripts/get-options.sh b/RO/osm_ro/scripts/get-options.sh
new file mode 100644 (file)
index 0000000..8b2968e
--- /dev/null
@@ -0,0 +1,175 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+#Utility for getting options, must be call with source
+#for every <option> it sets a variable 'option_<option>="-"' 
+#if the option appears more than once, it concatenates a "-"
+#if the option contains an argument: 'option_<option>="argument"'
+#if the long option name contains "-" they are converted to "_"
+#params that are not options are stored in 'params'
+#the options to look for is received in the first argument, 
+#a blank separator list with short and long options without the leading - or --
+#options to be stored in the same variable must appear in the same word separated by ':'
+#insert a trailing = if the option requires an argument
+#insert a trailing ? if the option may have an argument NOT IMPLEMENTED
+#option -- means get the rest of argument returned as 'option__=$*'
+
+#example: to allow options -h --help -j -k(with argument) --my-long-option(with argument)
+# and other parameters after -- provide
+#     "help:h j k= my-long-option="
+#parsing "-h -karg pepe --my-long-option=otherar -- -s" will set variables
+#       option_help="-"
+#       option_k="arg"
+#       option_my_long_option="otherarg"
+#       params=" pepe"
+#       option__="-s"
+
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+[[ ${BASH_SOURCE[0]} != $0 ]] && ___exit="return" || ___exit="exit"
+
+options="$1"
+shift
+
+get_argument=""
+#reset variables
+params=""
+for option_group in $options
+do
+    _name=${option_group%%:*}
+    _name=${_name%=}
+    _name=${_name//-/_}
+    eval option_${_name}='""'
+done
+
+while [[ $# -gt 0 ]]
+do
+    argument="$1"
+    shift
+    if [[ -n $get_argument ]]
+    then
+        [[ ${argument:0:1} == "-" ]] && echo "option '-$option' requires an argument"  >&2 && $___exit 1
+        eval ${get_argument}='"$argument"'
+        #echo option $get_argument with argument
+        get_argument=""
+        continue
+    fi
+
+
+    #short options
+    if [[ ${argument:0:1} == "-" ]] && [[ ${argument:1:1} != "-" ]] && [[ ${#argument} -ge 2 ]]
+    then
+        index=0
+        while index=$((index+1)) && [[ $index -lt ${#argument} ]]
+        do
+            option=${argument:$index:1}
+            bad_option=y
+            for option_group in $options
+            do
+                _name=""
+                for o in $(echo $option_group | tr ":=" " ")
+                do
+                    [[ -z "$_name" ]] && _name=${o//-/_}
+                    #echo option $option versus $o
+                    if [[ "$option" == "${o}" ]]
+                    then
+                        eval option_${_name}='${option_'${_name}'}-'
+                        bad_option=n
+                        if [[ ${option_group:${#option_group}-1} != "=" ]]
+                        then
+                            continue
+                        fi 
+                        if [[ ${#argument} -gt $((index+1)) ]]
+                        then
+                            eval option_${_name}='"${argument:$((index+1))}"'
+                            index=${#argument}
+                        else
+                            get_argument=option_${_name}
+                            #echo next should be argument $argument
+                        fi
+    
+                        break
+                    fi
+                done
+            done
+            [[ $bad_option == y ]] && echo "invalid argument '-$option'?  Type -h for help" >&2 && $___exit 1
+        done
+    elif [[ ${argument:0:2} == "--" ]] && [[ ${#argument} -ge 3 ]]
+    then 
+        option=${argument:2}
+        option_argument=${option#*=}
+        option_name=${option%%=*}
+        [[ "$option_name" == "$option" ]] && option_argument=""
+        bad_option=y
+        for option_group in $options
+        do
+            _name=""
+            for o in $(echo $option_group | tr ":=" " ")
+            do
+                [[ -z "$_name" ]] && _name=${o//-/_}
+                #echo option $option versus $o
+                if [[ "$option_name" == "${o}" ]]
+                then
+                    bad_option=n
+                    if [[ ${option_group:${#option_group}-1} != "=" ]] 
+                    then #not an argument
+                        [[ -n "${option_argument}" ]] && echo "option '--${option%%=*}' do not accept an argument " >&2 && $___exit 1
+                        eval option_${_name}='"${option_'${_name}'}-"'
+                    elif [[ -n "${option_argument}" ]]
+                    then
+                        eval option_${_name}='"${option_argument}"'
+                    else
+                        get_argument=option_${_name}
+                        #echo next should be argument $argument
+                    fi
+                    break
+                fi
+            done
+        done
+        [[ $bad_option == y ]] && echo "invalid argument '-$option'?  Type -h for help" >&2 && $___exit 1
+    elif [[ ${argument:0:2} == "--" ]]
+    then
+        option__="$*"
+        bad_option=y
+        for o in $options
+        do
+            if [[ "$o" == "--" ]]
+            then
+                bad_option=n
+                option__=" $*"
+                break
+            fi
+        done
+        [[ $bad_option == y ]] && echo "invalid argument '--'?  Type -h for help" >&2 && $___exit 1
+        break
+    else
+        params="$params ${argument}"
+    fi
+
+done
+
+[[ -n "$get_argument" ]] && echo "option '-$option' requires an argument"  >&2 && $___exit 1
+$___exit 0
+#echo params $params
+
diff --git a/RO/osm_ro/scripts/install-lib-osm-openvim.sh b/RO/osm_ro/scripts/install-lib-osm-openvim.sh
new file mode 100755 (executable)
index 0000000..93df95e
--- /dev/null
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# author: Alfonso Tierno
+
+exit 0 # TODO py3   for the moment no openvim library is installed
+
+# It uses following env, if not provided filling by default
+[ -z "$GIT_OVIM_URL" ] && GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
+[ -z "$DEVELOP" ] && DEVELOP=""
+# folder where RO is installed
+[ -z "$BASEFOLDER" ] && HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]})) && BASEFOLDER=$(dirname $HERE)
+[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
+[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
+[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
+
+
+function usage(){
+    echo -e "usage: sudo -E $0 [OPTIONS]"
+    echo -e "Install last stable source code of lib-osm-openvim and the needed packages"
+    echo -e "  OPTIONS"
+    echo -e "     -h --help:  show this help"
+    echo -e "     -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+    echo -e "                    -b master          (main branch)"
+    echo -e "                    -b v2.0            (v2.0 branch)"
+    echo -e "                    -b tags/v1.1.0     (a specific tag)"
+    echo -e "                    ..."
+    echo -e "     --develop:  install last master version for developers"
+    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This" \
+            "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+}
+while getopts ":b:h-:" o; do
+    case "${o}" in
+        b)
+            export COMMIT_ID=${OPTARG}
+            ;;
+        h)
+            usage && exit 0
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        \?)
+            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit 1
+            ;;
+    esac
+done
+
+su $SUDO_USER -c "git -C '${BASEFOLDER}' clone ${GIT_OVIM_URL} lib-openvim" ||
+    ! echo "Error cannot clone from '${GIT_OVIM_URL}'" >&2 || exit 1
+if [[ -n $COMMIT_ID ]] ; then
+    echo -e "Installing lib-osm-openvim from refspec: $COMMIT_ID"
+    su $SUDO_USER -c "git -C '${BASEFOLDER}/lib-openvim' checkout $COMMIT_ID" ||
+        ! echo "Error cannot checkout '$COMMIT_ID' from '${GIT_OVIM_URL}'" >&2 || exit 1
+elif [[ -z $DEVELOP ]]; then
+    LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/lib-openvim" tag -l "v[0-9]*" | sort -V | tail -n1`
+    echo -e "Installing lib-osm-openvim from refspec: tags/${LATEST_STABLE_TAG}"
+    su $SUDO_USER -c "git -C '${BASEFOLDER}/lib-openvim' checkout tags/${LATEST_STABLE_TAG}" ||
+        ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '${GIT_OVIM_URL}'" >&2 || exit 1
+else
+    echo -e "Installing lib-osm-openvim from refspec: master"
+fi
+
+make -C "${BASEFOLDER}/lib-openvim" prepare_lite
+export LANG="en_US.UTF-8"
+pip2 install -e  "${BASEFOLDER}/lib-openvim/build" || ! echo "ERROR installing lib-osm-openvim library!!!" >&2  ||
+    exit 1
diff --git a/RO/osm_ro/scripts/install-openmano-service.sh b/RO/osm_ro/scripts/install-openmano-service.sh
new file mode 100755 (executable)
index 0000000..c19b345
--- /dev/null
@@ -0,0 +1,168 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#ONLY TESTED for Ubuntu 16.04
+#it configures openmano to run as a service
+
+function usage(){
+    echo -e "usage: sudo -E $0 [OPTIONS]"
+    echo -e "Configures openmano to run as a service at /opt"
+    echo -e "  OPTIONS"
+    echo -e "     -u USER_OWNER  user owner of the service, 'root' by default"
+    echo -e "     -f PATH  path where openmano source is located. If missing it is downloaded from git"
+    echo -e "     -d --delete:  if -f is provided, delete this path after copying to /opt"
+    echo -e "     -h:  show this help"
+    echo -e "     --uninstall: remove created service and files"
+}
+
+function uninstall(){
+    echo "systemctl disable openmano.service " &&  systemctl disable openmano.service 2>/dev/null ||
+        echo "  Already done"
+    echo "systemctl disable osm-ro.service " &&  systemctl disable osm-ro.service 2>/dev/null ||
+        echo "  Already done"
+    echo "service openmano stop " && service openmano stop 2>/dev/null || echo "  Already done"
+    echo "service osm-ro stop " && service osm-ro stop 2>/dev/null || echo "  Already done"
+    for file in /opt/openmano /etc/default/openmanod.cfg /etc/osm/openmanod.cfg /var/log/openmano /var/log/osm/openmano* \
+        /etc/systemd/system/openmano.service /etc/systemd/system/osm-ro.service /usr/bin/openmano /usr/sbin/service-openmano \
+        /usr/bin/openmano-report
+    do
+        echo rm $file
+        rm -rf $file || ! echo "Can not delete '$file'. Needed root privileges?" >&2 || exit 1
+    done
+    echo "Done"
+}
+
+GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
+USER_OWNER="root"
+QUIET_MODE=""
+FILE=""
+DELETE=""
+while getopts ":u:f:hdq-:" o; do
+    case "${o}" in
+        u)
+            export USER_OWNER="$OPTARG"
+            ;;
+        f)
+            export FILE="$OPTARG"
+            ;;
+        q)
+            export QUIET_MODE=yes
+            ;;
+        h)
+            usage && exit 0
+            ;;
+        d)
+            DELETE=y
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "uninstall" ] && uninstall && exit 0
+            [ "${OPTARG}" == "delete" ] && DELETE=y && continue
+            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;; 
+        \?)
+            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit -1
+            ;;
+    esac
+done
+BAD_PATH_ERROR="Path '$FILE' does not contain a valid openmano distribution"
+
+#check root privileges
+[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
+
+#Discover Linux distribution
+#try redhat type
+if [[ -f /etc/redhat-release ]]
+then 
+    _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut  -d" " -f1)
+else 
+    #if not assuming ubuntu type
+    _DISTRO=$(lsb_release -is  2>/dev/null)
+fi            
+if [[ "$_DISTRO" == "Ubuntu" ]]
+then
+    _RELEASE=$(lsb_release -rs)
+    if [[ ${_RELEASE%%.*} != 16 ]] 
+    then 
+        echo "Only tested in Ubuntu Server 16.04" >&2 && exit 1
+    fi
+else
+    echo "Only tested in Ubuntu Server 16.04" >&2 && exit 1
+fi
+
+
+if [[ -z "$FILE" ]]
+then
+    FILE=__temp__${RANDOM}
+    git clone $GIT_URL $FILE || ! echo "Cannot get openmano source code from $GIT_URL" >&2 || exit 1
+    DELETE=y
+else
+    [[ -d  "$FILE" ]] || ! echo $BAD_PATH_ERROR >&2 || exit 1
+fi
+
+#make idempotent
+uninstall
+#copy files
+cp -r "$FILE" /opt/openmano         || ! echo $BAD_PATH_ERROR >&2 || exit 1
+mkdir -p /etc/osm  || echo "warning cannot create config folder '/etc/osm'"
+cp /opt/openmano/osm_ro/openmanod.cfg /etc/osm/openmanod.cfg  ||
+    echo "warning cannot create file '/etc/osm/openmanod.cfg'"
+mkdir -p /var/log/osm  || echo "warning cannot create log folder '/var/log/osm'"
+#makes links
+ln -s -v /opt/openmano/openmano /usr/bin/openmano
+ln -s -v /opt/openmano/scripts/service-openmano /usr/sbin/service-openmano
+ln -s -v /opt/openmano/scripts/openmano-report /usr/bin/openmano-report
+
+chown -R $SUDO_USER /opt/openmano
+
+mkdir -p /etc/systemd/system/
+cat  > /etc/systemd/system/osm-ro.service  << EOF 
+[Unit]
+Description=openmano server
+
+[Service]
+User=${USER_OWNER}
+ExecStart=/opt/openmano/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+[[ -n $DELETE ]] && rm -rf "${FILE}"
+
+service osm-ro start
+systemctl enable osm-ro.service
+
+echo Done
+exit
diff --git a/RO/osm_ro/scripts/install-openmano.sh b/RO/osm_ro/scripts/install-openmano.sh
new file mode 100755 (executable)
index 0000000..98d0905
--- /dev/null
@@ -0,0 +1,446 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#ONLY TESTED in Ubuntu 16.04   partially tested in Ubuntu 14.10 14.04 16.04, CentOS7 and RHEL7
+#Get needed packages, source code and configure to run openmano
+#Ask for database user and password if not provided
+
+function usage(){
+    echo -e "usage: sudo -E $0 [OPTIONS]"
+    echo -e "Install last stable source code in ./openmano and the needed packages"
+    echo -e "On a Ubuntu 16.04 it configures openmano as a service"
+    echo -e "  OPTIONS"
+    echo -e "     -u USER:    database admin user. 'root' by default. Prompts if needed"
+    echo -e "     -p PASS:    database admin password to be used or installed. Prompts if needed"
+    echo -e "     -q --quiet: install in unattended mode"
+    echo -e "     -h --help:  show this help"
+    echo -e "     -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+    echo -e "                    -b master          (main RO branch)"
+    echo -e "                    -b v2.0            (v2.0 branch)"
+    echo -e "                    -b tags/v1.1.0     (a specific tag)"
+    echo -e "                    ..."
+    echo -e "     --develop:  install last version for developers, and do not configure as a service"
+    echo -e "     --forcedb:  reinstall mano_db DB, deleting previous database if exists and creating a new one"
+    echo -e "     --updatedb: do not reinstall mano_db DB if it exists, just update database"
+    echo -e "     --force:    makes idenpotent, delete previous installations folders if needed. It assumes --updatedb if --forcedb option is not provided"
+    echo -e "     --noclone:  assumes that openmano was cloned previously and that this script is run from the local repo"
+    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+    echo -e "     --no-db: do not install mysql server"
+}
+
+function install_packages(){
+    [ -x /usr/bin/apt-get ] && apt-get install -y $*
+    [ -x /usr/bin/yum ]     && yum install     -y $*   
+    
+    #check properly installed
+    for PACKAGE in $*
+    do
+        PACKAGE_INSTALLED="no"
+        [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE            &>> /dev/null && PACKAGE_INSTALLED="yes"
+        [ -x /usr/bin/yum ]     && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" 
+        if [ "$PACKAGE_INSTALLED" = "no" ]
+        then
+            echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
+            exit 1
+       fi
+    done
+}
+
+function ask_user(){
+    # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
+    # Params: $1 text to ask;   $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
+    # Return: true(0) if user type 'yes'; false (1) if user type 'no'
+    read -e -p "$1" USER_CONFIRMATION
+    while true ; do
+        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
+        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
+        [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
+        [ "${USER_CONFIRMATION,,}" == "no" ]  || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
+        read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
+    done
+}
+
+GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
+export GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
+export GIT_OSMIM_URL=https://osm.etsi.org/gerrit/osm/IM.git
+DBUSER="root"
+DBPASSWD=""
+DBPASSWD_PARAM=""
+QUIET_MODE=""
+DEVELOP=""
+DB_FORCE_UPDATE=""
+UPDATEDB=""
+FORCE=""
+NOCLONE=""
+NO_PACKAGES=""
+NO_DB=""
+COMMIT_ID=""
+
+while getopts ":u:p:b:hiq-:" o; do
+    case "${o}" in
+        u)
+            export DBUSER="$OPTARG"
+            ;;
+        p)
+            export DBPASSWD="$OPTARG"
+            export DBPASSWD_PARAM="-p$OPTARG"
+            ;;
+        b)
+            export COMMIT_ID=${OPTARG}
+            ;;
+        q)
+            export QUIET_MODE=yes
+            export DEBIAN_FRONTEND=noninteractive
+            ;;
+        h)
+            usage && exit 0
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+            [ "${OPTARG}" == "forcedb" ] && DB_FORCE_UPDATE="${DB_FORCE_UPDATE}--forcedb" && continue
+            [ "${OPTARG}" == "updatedb" ] && DB_FORCE_UPDATE="${DB_FORCE_UPDATE}--updatedb" && continue
+            [ "${OPTARG}" == "force" ]   &&  FORCE="y" && continue
+            [ "${OPTARG}" == "noclone" ] && NOCLONE="y" && continue
+            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+            [ "${OPTARG}" == "no-db" ] && NO_DB="y" && continue
+            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        \?)
+            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit 1
+            ;;
+    esac
+done
+
+if [ "$DB_FORCE_UPDATE" == "--forcedb--updatedb" ] || [ "$DB_FORCE_UPDATE" == "--updatedb--forcedb" ] ; then
+    echo "Error: options --forcedb and --updatedb are mutually exclusive" >&2
+    exit 1
+elif [ -n "$FORCE" ] && [ -z "$DB_FORCE_UPDATE" ] ; then
+    DB_FORCE_UPDATE="--updatedb"
+fi
+
+#check root privileges and non a root user behind
+[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
+if [[ -z "$SUDO_USER" ]] || [[ "$SUDO_USER" = "root" ]]
+then
+    [[ -z $QUIET_MODE ]] && ! ask_user "Install in the root user (y/N)? " n  && echo "Cancelled" && exit 1
+    export SUDO_USER=root
+fi
+
+# Discover Linux distribution
+# try redhat type
+[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut  -d" " -f1) 
+# if not assuming ubuntu type
+[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is  2>/dev/null)            
+if [ "$_DISTRO" == "Ubuntu" ]
+then
+    _RELEASE=$(lsb_release -rs)
+    if [[ ${_RELEASE%%.*} != 14 ]] && [[ ${_RELEASE%%.*} != 16 ]]
+    then
+        [[ -z $QUIET_MODE ]] &&
+            ! ask_user "WARNING! Not tested Ubuntu version. Continue assuming a trusty (14.XX)' (y/N)? " n &&
+            echo "Cancelled" && exit 1
+        _RELEASE = 14
+    fi
+elif [ "$_DISTRO" == "CentOS" ]
+then
+    _RELEASE="7" 
+    if ! cat /etc/redhat-release | grep -q "7."
+    then
+        [[ -z $QUIET_MODE ]] &&
+            ! ask_user "WARNING! Not tested CentOS version. Continue assuming a '$_RELEASE' type (y/N)? " n &&
+            echo "Cancelled" && exit 1
+    fi
+elif [ "$_DISTRO" == "Red" ]
+then
+    _RELEASE="7" 
+    if ! cat /etc/redhat-release | grep -q "7."
+    then
+        [[ -z $QUIET_MODE ]] &&
+            ! ask_user "WARNING! Not tested Red Hat OS version. Continue assuming a '$_RELEASE' type (y/N)? " n &&
+            echo "Cancelled" && exit 1
+    fi
+else  #[ "$_DISTRO" != "Ubuntu" -a "$_DISTRO" != "CentOS" -a "$_DISTRO" != "Red" ] 
+    _DISTRO_DISCOVER=$_DISTRO
+    [ -x /usr/bin/apt-get ] && _DISTRO="Ubuntu" && _RELEASE="14"
+    [ -x /usr/bin/yum ]     && _DISTRO="CentOS" && _RELEASE="7"
+    [[ -z $QUIET_MODE ]] &&
+        ! ask_user "WARNING! Not tested Linux distribution '$_DISTRO_DISCOVER '. Continue assuming a '$_DISTRO $_RELEASE' type (y/N)? " n &&
+        echo "Cancelled" && exit 1
+fi
+
+export _DISTRO="$_DISTRO"
+#check if installed as a service
+INSTALL_AS_A_SERVICE=""
+[[ "$_DISTRO" == "Ubuntu" ]] &&  [[ ${_RELEASE%%.*} == 16 ]] && [[ -z $DEVELOP ]] && INSTALL_AS_A_SERVICE="y"
+
+# Next operations require knowing BASEFOLDER
+if [[ -z "$NOCLONE" ]]; then
+    if [[ -n "$INSTALL_AS_A_SERVICE" ]] ; then
+        export BASEFOLDER=__openmano__${RANDOM}
+    else
+        export BASEFOLDER="${PWD}/openmano"
+    fi
+    [[ -n "$FORCE" ]] && rm -rf $BASEFOLDER #make idempotent
+else
+    HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+    export BASEFOLDER=$(dirname $HERE)
+fi
+
+if [[ -z "$NO_PACKAGES" ]]
+then
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        UPDATE REPOSITORIES                            #####\n"\
+        "#################################################################"
+    [ "$_DISTRO" == "Ubuntu" ] && apt-get update -y &&
+        add-apt-repository -y cloud-archive:queens && apt-get update -y
+
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum check-update -y
+    [ "$_DISTRO" == "CentOS" ] && yum install -y epel-release
+    [ "$_DISTRO" == "Red" ] && wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm \
+        && rpm -ivh epel-release-7-5.noarch.rpm && yum install -y epel-release && rm -f epel-release-7-5.noarch.rpm
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum repolist
+
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        INSTALL REQUIRED PACKAGES                      #####\n"\
+        "#################################################################"
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "git make screen wget mysql-client"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git make screen wget mariadb-client"
+
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        INSTALL PYTHON PACKAGES                        #####\n"\
+        "#################################################################"
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-mysqldb python-jsonschema "\
+        "python-paramiko python-argcomplete python-requests python-logutils libxml2-dev libxslt-dev python-dev "\
+        "python-pip python-crypto python-networkx"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML MySQL-python python-jsonschema "\
+        "python-paramiko python-argcomplete python-requests python-logutils libxslt-devel libxml2-devel python-devel "\
+        "python-pip python-crypto python-networkx"
+    # The only way to install python-bottle on Centos7 is with easy_install or pip
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
+
+    # required for vmware connector TODO move that to separete opt in install script
+    pip2 install pip==9.0.3 || exit 1   #  --upgrade pip    install pip 10 that does not work
+    pip2 install pyvcloud==19.1.1 || exit 1
+    pip2 install progressbar || exit 1
+    pip2 install prettytable || exit 1
+    pip2 install pyvmomi || exit 1
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "genisoimage"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "genisoimage"
+
+    # required for fog connector
+    pip2 install fog05rest || exit 1
+
+    # required for OpenNebula connector
+    pip2 install untangle || exit 1
+    pip2 install pyone || exit 1
+    pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca || exit 1
+
+    # required for AWS connector
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-boto"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-boto"  #TODO check if at Centos it exists with this name, or PIP should be used
+
+    # install openstack client needed for using openstack as a VIM
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-novaclient python-keystoneclient python-glanceclient "\
+                                                   "python-neutronclient python-cinderclient python-openstackclient "\
+                                                   "python-networking-l2gw"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-devel" && easy_install \
+        python-novaclient python-keystoneclient python-glanceclient python-neutronclient python-cinderclient \
+        python-openstackclient python-networking-l2gw #TODO revise if gcc python-pip is needed
+
+    # required for Azure
+    pip2 install azure
+    
+fi  # [[ -z "$NO_PACKAGES" ]]
+
+if [[ -z $NOCLONE ]]; then
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        DOWNLOAD SOURCE                                #####\n"\
+        "#################################################################"
+    if [[ -d "${BASEFOLDER}" ]] ; then
+        if [[ -n "$FORCE" ]] ; then
+            echo "deleting '${BASEFOLDER}' folder"
+            rm -rf "$BASEFOLDER" #make idempotent
+        elif [[ -z "$QUIET_MODE" ]] ; then
+            ! ask_user "folder '${BASEFOLDER}' exists, overwrite (y/N)? " n && echo "Cancelled!" && exit 1
+            rm -rf "$BASEFOLDER"
+        else
+            echo "'${BASEFOLDER}' folder exists. Use "--force" to overwrite" >&2 && exit 1
+        fi
+    fi
+    su $SUDO_USER -c "git clone ${GIT_URL} ${BASEFOLDER}" || ! echo "Error cannot clone from '$GIT_URL'" >&2 || exit 1
+    if [[ -n $COMMIT_ID ]] ; then
+        echo -e "Installing osm-RO from refspec: $COMMIT_ID"
+        su $SUDO_USER -c "git -C ${BASEFOLDER} checkout $COMMIT_ID" ||
+            ! echo "Error cannot checkout '$COMMIT_ID' from '$GIT_URL'" >&2 || exit 1
+    elif [[ -z $DEVELOP ]]; then
+        LATEST_STABLE_TAG=`git -C "${BASEFOLDER}" tag -l "v[0-9]*" | sort -V | tail -n1`
+        echo -e "Installing osm-RO from refspec: tags/${LATEST_STABLE_TAG}"
+        su $SUDO_USER -c "git -C ${BASEFOLDER} checkout tags/${LATEST_STABLE_TAG}" ||
+            ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '$GIT_URL'" >&2 || exit 1
+    else
+        echo -e "Installing osm-RO from refspec: master"
+    fi
+    su $SUDO_USER -c "cp ${BASEFOLDER}/.gitignore-common ${BASEFOLDER}/.gitignore"
+fi
+
+echo -e "\n"\
+    "#################################################################\n"\
+    "#####        INSTALLING OSM-IM LIBRARY                      #####\n"\
+    "#################################################################"
+    ${BASEFOLDER}/scripts/install-osm-im.sh
+    OSM_IM_PATH=`python -c 'import osm_im; print osm_im.__path__[0]'` ||
+        ! echo "ERROR installing python-osm-im library!!!" >&2  || exit 1
+
+echo -e "\n"\
+    "#################################################################\n"\
+    "#####        INSTALLING OVIM LIBRARY                        #####\n"\
+    "#################################################################"
+    ${BASEFOLDER}/scripts/install-lib-osm-openvim.sh
+    OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` ||
+        ! echo "ERROR installing python-lib-osm-openvim library!!!" >&2  || exit 1
+
+if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
+then
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        CONFIGURE firewalld                            #####\n"\
+        "#################################################################"
+    if [[ -z $QUIET_MODE ]] || ask_user "Configure firewalld for openmanod port 9090 (Y/n)? " y
+    then
+        #Creates a service file for openmano
+        echo '<?xml version="1.0" encoding="utf-8"?>
+<service>
+ <short>openmanod</short>
+ <description>openmanod service</description>
+ <port protocol="tcp" port="9090"/>
+</service>' > /etc/firewalld/services/openmanod.xml
+        #put proper permissions
+        pushd /etc/firewalld/services > /dev/null
+        restorecon openmanod.xml
+        chmod 640 openmanod.xml
+        popd > /dev/null
+        #Add the openmanod service to the default zone permanently and reload the firewall configuration
+        firewall-cmd --permanent --add-service=openmanod > /dev/null
+        firewall-cmd --reload > /dev/null
+        echo "done." 
+    else
+        echo "skipping."
+    fi
+fi
+
+echo -e "\n"\
+    "#################################################################\n"\
+    "#####        CONFIGURE OPENMANO CLIENT                      #####\n"\
+    "#################################################################"
+#creates a link at ~/bin if not configured as a service
+if [[ -z "$INSTALL_AS_A_SERVICE" ]]
+then
+    su $SUDO_USER -c 'mkdir -p ${HOME}/bin'
+    su $SUDO_USER -c 'rm -f ${HOME}/bin/openmano'
+    su $SUDO_USER -c 'rm -f ${HOME}/bin/openmano-report'
+    su $SUDO_USER -c 'rm -f ${HOME}/bin/service-openmano'
+    su $SUDO_USER -c "ln -s '${BASEFOLDER}/openmano' "'${HOME}/bin/openmano'
+    su $SUDO_USER -c "ln -s '${BASEFOLDER}/scripts/openmano-report.sh'   "'${HOME}/bin/openmano-report'
+    su $SUDO_USER -c "ln -s '${BASEFOLDER}/scripts/service-openmano'  "'${HOME}/bin/service-openmano'
+
+    #insert /home/<user>/bin in the PATH
+    #skiped because normally this is done authomatically when ~/bin exists
+    #if ! su $SUDO_USER -c 'echo $PATH' | grep -q "${HOME}/bin"
+    #then
+    #    echo "    inserting /home/$SUDO_USER/bin in the PATH at .bashrc"
+    #    su $SUDO_USER -c 'echo "PATH=\$PATH:\${HOME}/bin" >> ~/.bashrc'
+    #fi
+    
+    if [[ $SUDO_USER == root ]]
+    then
+        if ! echo $PATH | grep -q "${HOME}/bin"
+        then
+            echo "PATH=\$PATH:\${HOME}/bin" >> ${HOME}/.bashrc
+        fi
+    fi
+fi
+
+#configure arg-autocomplete for this user
+#in case of minimal instalation this package is not installed by default
+[[ "$_DISTRO" == "CentOS" || "$_DISTRO" == "Red" ]] && yum install -y bash-completion
+#su $SUDO_USER -c 'mkdir -p ~/.bash_completion.d'
+su $SUDO_USER -c 'activate-global-python-argcomplete --user'
+if ! su  $SUDO_USER -c 'grep -q bash_completion.d/python-argcomplete.sh ${HOME}/.bashrc'
+then
+    echo "    inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc"
+    su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
+fi
+
+if [ -z "$NO_DB" ]; then
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####               INSTALL DATABASE SERVER                 #####\n"\
+        "#################################################################"
+
+    if [ -n "$QUIET_MODE" ]; then
+        DB_QUIET='-q'
+    fi
+    ${BASEFOLDER}/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} $DB_QUIET $DB_FORCE_UPDATE || exit 1
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        CREATE AND INIT MANO_VIM DATABASE              #####\n"\
+        "#################################################################"
+    # Install mano_vim_db after setup
+    ${OSMLIBOVIM_PATH}/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} -u mano -p manopw -d mano_vim_db --no-install-packages $DB_QUIET $DB_FORCE_UPDATE || exit 1
+fi   # [ -z "$NO_DB" ]
+
+if [[ -n "$INSTALL_AS_A_SERVICE"  ]]
+then
+    echo -e "\n"\
+        "#################################################################\n"\
+        "#####        CONFIGURE OPENMANO SERVICE                     #####\n"\
+        "#################################################################"
+
+    ${BASEFOLDER}/scripts/install-openmano-service.sh -f ${BASEFOLDER} `[[ -z "$NOCLONE" ]] && echo "-d"`
+    # rm -rf ${BASEFOLDER}
+    # alias service-openmano="service openmano"
+    # echo 'alias service-openmano="service openmano"' >> ${HOME}/.bashrc
+    echo
+    echo "Done!  installed at /opt/openmano"
+    echo " Manage server with 'sudo -E service osm-ro start|stop|status|...' "
+else
+    echo
+    echo "Done!  you may need to logout and login again for loading client configuration"
+    echo " Run './${BASEFOLDER}/scripts/service-openmano start' for starting openmano in a screen"
+fi
diff --git a/RO/osm_ro/scripts/install-osm-im.sh b/RO/osm_ro/scripts/install-osm-im.sh
new file mode 100755 (executable)
index 0000000..8f733ce
--- /dev/null
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# author: Alfonso Tierno
+
+# It uses following env, if not provided filling by default
+[ -z "$GIT_OSMIM_URL" ] && GIT_OSMIM_URL=https://osm.etsi.org/gerrit/osm/IM.git
+[ -z "$DEVELOP" ] && DEVELOP=""
+# folder where RO is installed
+[ -z "$BASEFOLDER" ] && HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]})) && BASEFOLDER=$(dirname $(dirname $HERE))
+[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
+[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
+[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
+
+function usage(){
+    echo -e "usage: sudo -E $0 [OPTIONS]"
+    echo -e "Install last stable source code of osm-im and the needed packages"
+    echo -e "  OPTIONS"
+    echo -e "     -h --help:  show this help"
+    echo -e "     -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
+    echo -e "                    -b master          (main branch)"
+    echo -e "                    -b v2.0            (v2.0 branch)"
+    echo -e "                    -b tags/v1.1.0     (a specific tag)"
+    echo -e "                    ..."
+    echo -e "     --develop:  install last master version for developers"
+    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This" \
+            "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
+}
+while getopts ":b:h-:" o; do
+    case "${o}" in
+        b)
+            export COMMIT_ID=${OPTARG}
+            ;;
+        h)
+            usage && exit 0
+            ;;
+        -)
+            [ "${OPTARG}" == "help" ] && usage && exit 0
+            [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
+            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
+            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
+            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        \?)
+            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        :)
+            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
+            exit 1
+            ;;
+        *)
+            usage >&2
+            exit 1
+            ;;
+    esac
+done
+
+su $SUDO_USER -c "git -C ${BASEFOLDER} clone ${GIT_OSMIM_URL} IM" ||
+    ! echo "Error cannot clone from '${GIT_OSMIM_URL}'" >&2 || exit 1
+if [[ -n $COMMIT_ID ]] ; then
+    echo -e "Installing osm-IM from refspec: $COMMIT_ID"
+    su $SUDO_USER -c "git -C ${BASEFOLDER}/IM checkout $COMMIT_ID" ||
+        ! echo "Error cannot checkout '$COMMIT_ID' from '${GIT_OSMIM_URL}'" >&2 || exit 1
+elif [[ -z $DEVELOP ]]; then
+    LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/IM" tag -l "v[0-9]*" | sort -V | tail -n1`
+    echo -e "Installing osm-IM from refspec: tags/${LATEST_STABLE_TAG}"
+    su $SUDO_USER -c "git -C ${BASEFOLDER}/IM checkout tags/${LATEST_STABLE_TAG}" ||
+        ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '${GIT_OSMIM_URL}'" >&2 || exit 1
+else
+    echo -e "Installing osm-IM from refspec: master"
+fi
+
+# Install debian dependencies before setup.py
+if [[ -z "$NO_PACKAGES" ]]
+then
+    # apt-get update
+    # apt-get install -y git python-pip
+    # pip2 install pip==9.0.3
+    python3 -m pip install pyangbind || exit 1
+fi
+
+PYBINDPLUGIN=$(python3 -c 'import pyangbind; import os; print(os.path.dirname(pyangbind.__file__)+"/plugin")')
+su $SUDO_USER -c 'mkdir -p "'${BASEFOLDER}/IM/osm_im'"'
+su $SUDO_USER -c 'touch "'${BASEFOLDER}/IM/osm_im/__init__.py'"'
+# wget -q https://raw.githubusercontent.com/RIFTIO/RIFT.ware/RIFT.ware-4.4.1/modules/core/util/yangtools/yang/rw-pb-ext.yang -O "${BASEFOLDER}/IM/models/yang/rw-pb-ext.yang"
+for target in vnfd nsd ; do
+    pyang -Werror --path "${BASEFOLDER}/IM/models/yang" --plugindir "${PYBINDPLUGIN}" -f pybind \
+        -o "${BASEFOLDER}/IM/osm_im/${target}.py" "${BASEFOLDER}/IM/models/yang/${target}.yang"
+done
+
+python3 -m pip install -e "${BASEFOLDER}/IM" || ! echo "ERROR installing python-osm-im library!!!" >&2  || exit 1
diff --git a/RO/osm_ro/scripts/openmano-report b/RO/osm_ro/scripts/openmano-report
new file mode 100755 (executable)
index 0000000..f2180af
--- /dev/null
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#It generates a report for debugging
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+OMCLIENT=openmano
+
+#get screen log files at the beginning
+echo
+echo "-------------------------------"
+echo "log files"
+echo "-------------------------------"
+echo "-------------------------------"
+echo "OPENMANO"
+echo "-------------------------------"
+echo "cat /var/log/osm/openmano.log*"
+cat /var/log/osm/openmano.log*
+echo
+echo "-------------------------------"
+echo
+
+#get version
+echo
+echo "-------------------------------"
+echo "version"
+echo "-------------------------------"
+echo "-------------------------------"
+echo "OPENMANO"
+echo "-------------------------------"
+echo "openmanod --version"
+openmanod --version
+echo
+echo "-------------------------------"
+echo
+
+#get configuration files
+echo "-------------------------------"
+echo "Configuration files"
+echo "-------------------------------"
+echo "-------------------------------"
+echo "OPENMANO"
+echo "-------------------------------"
+echo "cat /etc/osm/openmanod.cfg"
+cat /etc/osm/openmanod.cfg
+echo "-------------------------------"
+echo
+
+#get list of items
+for verbose in "" "-vvv"
+do
+  echo "-------------------------------"
+  echo "OPENMANO$verbose"
+  echo "-------------------------------"
+  echo "$OMCLIENT config $verbose"
+  $OMCLIENT config
+  echo "-------------------------------"
+  echo "$OMCLIENT tenant-list $verbose"
+  $OMCLIENT tenant-list $verbose
+  echo "-------------------------------"
+  echo "$OMCLIENT datacenter-list --all"
+  $OMCLIENT datacenter-list --all
+  echo "-------------------------------"
+  echo "$OMCLIENT datacenter-list $verbose"
+  $OMCLIENT datacenter-list $verbose
+  echo "-------------------------------"
+  dclist=`$OMCLIENT datacenter-list |awk '{print $1}'`
+  for dc in $dclist; do
+    echo "$OMCLIENT datacenter-net-list $dc $verbose"
+    $OMCLIENT datacenter-net-list $dc $verbose
+    echo "-------------------------------"
+  done
+  echo "$OMCLIENT vnf-list $verbose"
+  $OMCLIENT vnf-list $verbose
+  echo "-------------------------------"
+  vnflist=`$OMCLIENT vnf-list |awk '$1!="No" {print $1}'`
+  for vnf in $vnflist; do
+    echo "$OMCLIENT vnf-list $vnf $verbose"
+    $OMCLIENT vnf-list $vnf $verbose
+    echo "-------------------------------"
+  done
+  echo "$OMCLIENT scenario-list $verbose"
+  $OMCLIENT scenario-list $verbose
+  echo "-------------------------------"
+  scenariolist=`$OMCLIENT scenario-list |awk '$1!="No" {print $1}'`
+  for sce in $scenariolist; do
+    echo "$OMCLIENT scenario-list $sce $verbose"
+    $OMCLIENT scenario-list $sce $verbose
+    echo "-------------------------------"
+  done
+  echo "$OMCLIENT instance-scenario-list $verbose"
+  $OMCLIENT instance-scenario-list $verbose
+  echo "-------------------------------"
+  instancelist=`$OMCLIENT instance-scenario-list |awk '$1!="No" {print $1}'`
+  for i in $instancelist; do
+    echo "$OMCLIENT instance-scenario-list $i $verbose"
+    $OMCLIENT instance-scenario-list $i $verbose
+    echo "-------------------------------"
+  done
+  echo
+
+done
+echo
diff --git a/RO/osm_ro/scripts/service-openmano b/RO/osm_ro/scripts/service-openmano
new file mode 100755 (executable)
index 0000000..a44743d
--- /dev/null
@@ -0,0 +1,212 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+#launch openmano inside a screen.
+#or call service if it is installed on systemd
+
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+DIR_OM=$(dirname $DIRNAME )
+
+function usage(){
+    echo -e "Usage: $0 [openmano/mano] start|stop|restart|status"
+    echo -e "  Launch|Removes|Restart|Getstatus openmano on a screen/service"
+    echo -e "    -n --screen-name NAME : name of screen to launch openmano (default mano or service)"
+    echo -e "    -h --help: shows this help"
+    echo -e "    -- PARAMS use to separate PARAMS that will be send to the service. e.g. -pPORT -PADMINPORT --dbname=DDBB"
+}
+
+
+function kill_pid(){
+    #send TERM signal and wait 5 seconds and send KILL signal ir still running
+    #PARAMS: $1: PID of process to terminate
+    kill $1 #send TERM signal
+    WAIT=5
+    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
+    do
+        sleep 1
+        WAIT=$((WAIT-1))
+        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
+    done
+    echo "done"
+}
+
+#process options
+source ${DIRNAME}/get-options.sh "screen-name:n= help:h --" $* || exit 1
+
+#help
+[ -n "$option_help" ] && usage && exit 0
+
+
+#obtain parameters
+om_list=""
+#om_action="start"  #uncoment to get a default action
+action_list=""
+om_params="$option__"
+
+for param in $params
+do
+    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
+    [ "$param" == "openmano" -o "$param" == "mano" ]   && om_list="$om_list mano"             && continue
+    #short options
+    echo "invalid argument '$param'?  Type -h for help" >&2 && exit 1
+done
+
+[[ -n $option_screen_name ]] && option_screen_name=${option_screen_name#*.} #allow the format 'pid.name' and keep only name
+#check action is provided
+[ -z "$om_action" ] && usage >&2 && exit -1
+
+#if no componenets supplied assume all
+[ -z "$om_list" ] && om_list="mano"
+function find_process_id(){ #PARAMS:  command screen-name
+    for process_id in `ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep "${1}" | awk '{print $1}'`
+    do
+        scname=$(ps wwep $process_id | grep -o 'STY=\S*')
+        scname=${scname#STY=}
+        [[ -n "$2" ]] && [[ "${scname#*.}" != "$2" ]] && continue
+        echo -n "${process_id} "
+    done
+    echo
+}
+
+for om_component in $om_list
+do
+    screen_name="${om_component}"
+    [[ -n "$option_screen_name" ]] && screen_name=$option_screen_name
+    [ "${om_component}" == "mano" ] && om_cmd="./openmanod"   && om_name="openmano  " && om_dir=$(readlink -f ${DIR_OM})
+    #obtain PID of program
+    component_id=`find_process_id "${om_cmd}" $option_screen_name`
+    processes=$(echo $component_id | wc -w)
+
+    #status
+    if [ "$om_action" == "status" ]
+    then
+        running=""
+        for process_id in $component_id
+        do
+            scname=$(ps wwep $process_id | grep -o 'STY=\S*')
+            scname=${scname#STY=}
+            [[ -n "$option_screen_name" ]] && [[ "${scname#*.}" != "$option_screen_name" ]] && continue
+            printf "%-15s" "pid: ${process_id},"
+            [[ -n "$scname" ]] && printf "%-25s" "screen: ${scname},"
+            echo cmd: $(ps -o cmd p $process_id | tail -n1 )
+            running=y
+        done
+        #if installed as a service and it is not provided a screen name call service
+        [[ -f /etc/systemd/system/osm-ro.service ]] && [[ -z $option_screen_name ]] && running=y #&& service osm-ro status
+        if [ -z "$running" ]
+        then
+            echo -n "    $om_name not running" && [[ -n "$option_screen_name" ]] && echo " on screen '$option_screen_name'" || echo
+        fi
+    fi
+
+    #if installed as a service and it is not provided a screen name call service
+    [[ -f /etc/systemd/system/osm-ro.service ]] && [[ -z $option_screen_name ]] && service osm-ro $om_action && ( [[ $om_action == status ]] || sleep 5 ) && exit $?
+
+
+    #stop
+    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
+    then
+        #terminates program
+        [ $processes -gt 1 ] && echo "$processes processes are running, specify with --screen-name" && continue
+        [ $processes -eq 1 ] && echo -n "    stopping $om_name ... " && kill_pid $component_id
+        component_id=""
+        #terminates screen
+        if screen -wipe | grep -q -e  "\.${screen_name}\b" 
+        then
+            screen -S $screen_name -p 0 -X stuff "exit\n" || echo
+            sleep 1
+        fi
+    fi
+
+    #start
+    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
+    then
+        #calculates log file name
+        logfile=""
+        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/open${screen_name}.log || echo "can not create logs directory  $DIR_OM/logs"
+        #check already running
+        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
+        #create screen if not created
+        echo -n "    starting $om_name ... "
+        if ! screen -wipe | grep -q -e "\.${screen_name}\b"
+        then
+            pushd ${om_dir} > /dev/null
+            screen -dmS ${screen_name}  bash
+            sleep 1
+            popd > /dev/null
+        else
+            echo -n " using existing screen '${screen_name}' ... "
+            screen -S ${screen_name} -p 0 -X log off
+            screen -S ${screen_name} -p 0 -X stuff "cd ${om_dir}\n"
+            sleep 1
+        fi
+        #move old log file index one number up and log again in index 0
+        if [[ -n $logfile ]]
+        then
+            for index in 8 7 6 5 4 3 2 1
+            do
+                [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
+            done
+            [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
+            screen -S ${screen_name} -p 0 -X logfile ${logfile}
+            screen -S ${screen_name} -p 0 -X log on
+        fi
+        #launch command to screen
+        screen -S ${screen_name} -p 0 -X stuff "${om_cmd}${om_params}\n"
+        #check if is running
+        [[ -n $logfile ]] && timeout=120 #2 minute
+        [[ -z $logfile ]] && timeout=20
+        while [[ $timeout -gt 0 ]]
+        do
+           #check if is running
+           #echo timeout $timeout
+           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
+           log_lines=0
+           [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
+           component_id=`find_process_id "${om_cmd}${om_params}" $screen_name`
+           if [[ -z $component_id ]]
+           then #process not started or finished
+               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
+               #started because writted serveral lines at log so report error
+           fi
+           [[ -n $logfile ]] && grep -q "open${om_component}d ready" ${logfile} && break
+           sleep 1
+           timeout=$((timeout -1))
+        done
+        if [[ -n $logfile ]] && [[ $timeout == 0 ]] 
+        then 
+           echo -n "timeout!"
+        else
+           echo -n "running on 'screen -x ${screen_name}'."
+        fi
+        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
+    fi
+done
+
+
+
+
diff --git a/RO/osm_ro/tests/__init__.py b/RO/osm_ro/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/RO/osm_ro/tests/db_helpers.py b/RO/osm_ro/tests/db_helpers.py
new file mode 100644 (file)
index 0000000..bedf9a5
--- /dev/null
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+import hashlib
+import shlex
+import unittest
+from contextlib import contextmanager
+from functools import wraps
+from hashlib import md5
+from os import environ, pathsep
+from subprocess import STDOUT, check_output
+from uuid import UUID
+
+from MySQLdb import connect
+
+from ..nfvo_db import nfvo_db
+
+HOST = environ.get('TEST_DB_HOST', 'localhost')
+USER = environ.get('TEST_DB_USER', 'mano')
+PASSWORD = environ.get('TEST_DB_PASSWORD', 'manopw')
+DATABASE = environ.get('TEST_DB_DATABASE', 'mano_db')
+
+
+def uuid(seed):
+    """Generates strings with a UUID format in a repeatable way"""
+    return str(UUID(md5(str(seed)).hexdigest()))
+
+
+def sha1(text):
+    """Generates SHA1 hash code from a text string"""
+    return hashlib.sha1(text).hexdigest()
+
+
+def run(*args, **kwargs):
+    """Run a command inside a subprocess, raising an exception when it fails
+
+    Arguments:
+        *args: you can pass any number of arquments as separated words in the
+            shell, or just a single string with the entire command
+        **kwargs: proxied to subprocess.check_output (by default
+            ``stderr=STDOUT`` and ``universal_newlines=True``
+    """
+    if len(args) == 1 and isinstance(args[0], str):
+        args = shlex.split(args[0])
+
+    opts = dict(stderr=STDOUT, universal_newlines=True)
+    opts.update(kwargs)
+    return check_output(args, **opts)
+
+
+# In order to not mess around, enforce user to explicit set the
+# test database in a env variable
+@unittest.skipUnless(
+    environ.get('TEST_DB_HOST'),
+    'Test database not available. Please set TEST_DB_HOST env var')
+class TestCaseWithDatabase(unittest.TestCase):
+    """Connect to the database and provide methods to facilitate isolating the
+    database stored inside it between tests.
+
+    In order to avoid connecting, reconnecting, creating tables and destroying
+    tables all the time, this class manage the database using class-level
+    fixtures. This reduce the cost of performing these actions but not
+    guarantees isolation in the DB state between the tests.
+    To enforce isolation, please call the ``setup_tables`` and
+    ``empty_database`` directly, or write one single test per class.
+    """
+
+    host = HOST
+    user = USER
+    password = PASSWORD
+    database = DATABASE
+
+    @classmethod
+    def setup_tables(cls):
+        """Make sure the database is set up and in the right version, with all the
+        required tables.
+        """
+        dbutils = environ.get('DBUTILS')
+
+        if dbutils:
+            environ["PATH"] += pathsep + dbutils
+
+        return run('init_mano_db.sh',
+                   '-u', cls.user,
+                   '-p', cls.password,
+                   '-h', cls.host,
+                   '-d', cls.database)
+
+    @classmethod
+    def empty_database(cls):
+        """Clear the database, so one test does not interfere with the other"""
+        # Create a custom connection not attached to the database, so we can
+        # destroy and recreate the database itself
+        connection = connect(cls.host, cls.user, cls.password)
+        cursor = connection.cursor()
+        cursor.execute(
+            "DROP DATABASE {};".format(
+                connection.escape_string(cls.database)))
+        cursor.execute(
+            "CREATE DATABASE {};".format(
+                connection.escape_string(cls.database)))
+        cursor.close()
+        connection.close()
+
+
+class TestCaseWithDatabasePerTest(TestCaseWithDatabase):
+    """Ensure a connection to the database before and
+    drop tables after each test runs
+    """
+
+    def setUp(self):
+        self.setup_tables()
+        self.addCleanup(self.empty_database)
+
+        self.maxDiff = None
+
+        self.db = nfvo_db(self.host, self.user, self.password, self.database)
+        self.db.connect()
+
+    def populate(self, seeds=None, **kwargs):
+        """Seed the database with initial values"""
+        if not seeds:
+            seeds = []
+        if not isinstance(seeds, (list, tuple)):
+            seeds = [seeds]
+        if kwargs:
+            seeds.append(kwargs)
+        self.db.new_rows(seeds)
+
+    def count(self, table):
+        """Count number of rows in a table"""
+        return self.db.get_rows(
+            SELECT='COUNT(*) as count', FROM=table)[0]['count']
+
+    @contextmanager
+    def disable_foreign_keys(self):
+        """Do the test without checking foreign keys"""
+        try:
+            cursor = self.db.con.cursor()
+            cursor.execute('SET FOREIGN_KEY_CHECKS=0;')
+            yield
+        finally:
+            cursor.execute('SET FOREIGN_KEY_CHECKS=1;')
+
+
+def disable_foreign_keys(test):
+    """Do the test without checking foreign keys.
+    To be used together in subclasses of TestCaseWithDatabasePerTest
+    """
+    @wraps(test)
+    def _no_check(self, *args, **kwargs):
+        with self.disable_foreign_keys():
+            result = test(self, *args, **kwargs)
+
+        return result
+
+    return _no_check
diff --git a/RO/osm_ro/tests/helpers.py b/RO/osm_ro/tests/helpers.py
new file mode 100644 (file)
index 0000000..011e880
--- /dev/null
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import logging
+import unittest
+from collections import defaultdict
+
+from io import StringIO
+
+from unittest.mock import MagicMock, patch
+
+logger = logging.getLogger()
+
+
+class TestCaseWithLogging(unittest.TestCase):
+    """Attach a special handler to the root logger, capturing the logs in a
+    internal buffer (caplog property).
+
+    To retrieve the logs, do::
+
+        self.caplog.getvalue()
+    """
+    def setUp(self):
+        super(TestCaseWithLogging, self).setUp()
+        self.logger = logging.getLogger()
+        self.caplog = StringIO()
+        self.log_handler = logging.StreamHandler(self.caplog)
+        self.logger.addHandler(self.log_handler)
+        self.logger.setLevel(logging.NOTSET)
+
+    def tearDown(self):
+        super(TestCaseWithLogging, self).tearDown()
+        self.log_handler.close()
+        self.logger.removeHandler(self.log_handler)
+
+
+def mock_imports(modules, preserve=()):
+    """Given a list of modules, mock everything, unless listed in the preserve
+    argument.
+    """
+    # Ensure iterable
+    if isinstance(modules, str):
+        modules = (modules,)
+    if isinstance(preserve, str):
+        preserve = (preserve,)
+
+    # First expand the list, since children modules needs their parent also
+    # mocked most of the time.
+    # Example: ['Crypto.PublicKey'] => ['Crypto', 'Crypto.PublicKey']
+    all_modules = []
+    for name in modules:
+        parts = name.split('.')
+        compound_name = []
+        for part in parts:
+            compound_name.append(part)
+            all_modules.append('.'.join(compound_name))
+
+    all_modules = set(m for m in all_modules if m not in preserve)
+    for module in all_modules:
+        logger.info('Mocking module `%s`', module)
+
+    mocks = {module: MagicMock() for module in all_modules}
+
+    return patch.dict('sys.modules', **mocks)
+
+
+def mock_dict(**kwargs):
+    """Create a dict that always respond something.
+
+    Arguments:
+        **kwargs: certain items that should be set in the created object
+    """
+    response = defaultdict(MagicMock)
+    for k, v in kwargs.items():
+        response[k] = v
+
+    return response
+
+
+def mock_object(**kwargs):
+    """Create an object that always respond something.
+
+    Arguments:
+        **kwargs: certain attributes that should be set in the created object
+    """
+    response = MagicMock()
+    for k, v in kwargs.items():
+        setattr(response, k, v)
+
+    return response
diff --git a/RO/osm_ro/tests/test_db.py b/RO/osm_ro/tests/test_db.py
new file mode 100644 (file)
index 0000000..5e90bd9
--- /dev/null
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=E1101
+import unittest
+
+from MySQLdb import connect, cursors, DatabaseError, IntegrityError
+
+from ..db_base import retry, with_transaction
+from ..nfvo_db import nfvo_db
+from .db_helpers import TestCaseWithDatabase
+
+
+class TestDbDecorators(TestCaseWithDatabase):
+    @classmethod
+    def setUpClass(cls):
+        connection = connect(cls.host, cls.user, cls.password)
+        cursor = connection.cursor()
+        cursor.execute(
+            "CREATE DATABASE IF NOT EXISTS {};".format(
+                connection.escape_string(cls.database)))
+        cursor.execute("use {};".format(cls.database))
+        cursor.execute("""\
+            CREATE TABLE IF NOT EXISTS `test_table` (\
+                `id` int(11) NOT NULL,
+                PRIMARY KEY (`id`)\
+            );\
+        """)
+        cursor.close()
+        connection.close()
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.empty_database()
+
+    def setUp(self):
+        self.maxDiff = None
+        self.db = nfvo_db(self.host, self.user, self.password, self.database)
+        self.db.connect()
+        self.addCleanup(lambda: self.db.disconnect())
+
+    def db_run(self, query, cursor=None):
+        cursor = cursor or self.db.con.cursor()
+        cursor.execute(query)
+        return cursor.fetchone()
+
+    def test_retry_inject_attempt(self):
+        @retry
+        def _fn(db, attempt=None):
+            self.assertIsNotNone(attempt)
+            self.assertEqual(attempt.number, 1)
+
+        _fn(self.db)
+
+    def test_retry_accept_max_attempts(self):
+        success = []
+        failures = []
+
+        @retry(max_attempts=5)
+        def _fn(db, attempt=None):
+            if attempt.count < 4:
+                failures.append(attempt.count)
+                raise DatabaseError("Emulate DB error", "msg")
+            success.append(attempt.count)
+
+        _fn(self.db)
+        self.assertEqual(failures, [0, 1, 2, 3])
+        self.assertEqual(success, [4])
+
+    def test_retry_reconnect_auctomatically(self):
+        success = []
+        failures = []
+
+        @retry(max_attempts=3)
+        def _fn(db, attempt=None):
+            if attempt.count < 2:
+                failures.append(attempt.count)
+                db.con.close()  # Simulate connection failure
+            result = self.db_run('select 1+1, 2+2;')
+            success.append(attempt.count)
+            return result
+
+        result = _fn(self.db)
+        self.assertEqual(failures, [0, 1])
+        self.assertEqual(success, [2])
+        self.assertEqual(result, (2, 4))
+
+    def test_retry_reraise_non_db_errors(self):
+        failures = []
+
+        @retry
+        def _fn(db, attempt=None):
+            failures.append(attempt.count)
+            raise SystemError("Non Correlated Error")
+
+        with self.assertRaises(SystemError):
+            _fn(self.db)
+
+        self.assertEqual(failures, [0])
+
+    def test_transaction_rollback(self):
+        with self.assertRaises(IntegrityError), \
+                 self.db.transaction() as cursor:
+            # The first row is created normally
+            self.db_run('insert into test_table (id) values (1)', cursor)
+            # The second row fails due to repeated id
+            self.db_run('insert into test_table (id) values (1)', cursor)
+            # The entire transaction will rollback then, and therefore the
+            # first operation will be undone
+
+        count = self.db_run('select count(*) FROM test_table')
+        self.assertEqual(count, (0,))
+
+    def test_transaction_cursor(self):
+        with self.db.transaction(cursors.DictCursor) as cursor:
+            count = self.db_run('select count(*) as counter FROM test_table',
+                                cursor)
+
+        self.assertEqual(count, {'counter': 0})
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/tests/test_utils.py b/RO/osm_ro/tests/test_utils.py
new file mode 100644 (file)
index 0000000..9fd71cf
--- /dev/null
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=E1101
+
+import unittest
+
+from ..utils import (
+    get_arg,
+    inject_args,
+    remove_extra_items,
+)
+
+
+class TestUtils(unittest.TestCase):
+    def test_inject_args_curries_arguments(self):
+        fn = inject_args(lambda a=None, b=None: a + b, a=3, b=5)
+        self.assertEqual(fn(), 8)
+
+    def test_inject_args_doesnt_add_arg_if_not_needed(self):
+        fn = inject_args(lambda: 7, a=1, b=2)
+        self.assertEqual(fn(), 7)
+        fn = inject_args(lambda a=None: a, b=2)
+        self.assertEqual(fn(1), 1)
+
+    def test_inject_args_knows_how_to_handle_arg_order(self):
+        fn = inject_args(lambda a=None, b=None: b - a, a=3)
+        self.assertEqual(fn(b=4), 1)
+        fn = inject_args(lambda b=None, a=None: b - a, a=3)
+        self.assertEqual(fn(b=4), 1)
+
+    def test_inject_args_works_as_decorator(self):
+        fn = inject_args(x=1)(lambda x=None: x)
+        self.assertEqual(fn(), 1)
+
+    def test_get_arg__positional(self):
+        def _fn(x, y, z):
+            return x + y + z
+
+        x = get_arg("x", _fn, (1, 3, 4), {})
+        self.assertEqual(x, 1)
+        y = get_arg("y", _fn, (1, 3, 4), {})
+        self.assertEqual(y, 3)
+        z = get_arg("z", _fn, (1, 3, 4), {})
+        self.assertEqual(z, 4)
+
+    def test_get_arg__keyword(self):
+        def _fn(x, y, z=5):
+            return x + y + z
+
+        z = get_arg("z", _fn, (1, 2), {"z": 3})
+        self.assertEqual(z, 3)
+
+
+
+    def test_remove_extra_items__keep_aditional_properties(self):
+        schema = {
+            "type": "object",
+            "properties": {
+                "a": {
+                    "type": "object",
+                    "properties": {
+                        "type": "object",
+                        "properties": {"b": "string"},
+                    },
+                    "additionalProperties": True,
+                }
+            },
+        }
+
+        example = {"a": {"b": 1, "c": 2}, "d": 3}
+        deleted = remove_extra_items(example, schema)
+        self.assertIn("d", deleted)
+        self.assertIs(example.get("d"), None)
+        self.assertEqual(example["a"]["c"], 2)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/RO/osm_ro/utils.py b/RO/osm_ro/utils.py
new file mode 100644 (file)
index 0000000..ac291c1
--- /dev/null
@@ -0,0 +1,429 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+utils is a module that implements functions that are used by all openmano modules,
+dealing with aspects such as reading/writing files, formatting inputs/outputs for quick translation
+from dictionaries to appropriate database dictionaries, etc.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$08-sep-2014 12:21:22$"
+
+import datetime
+import time
+import warnings
+from functools import reduce, partial, wraps
+from itertools import tee
+
+from itertools import filterfalse
+
+from jsonschema import exceptions as js_e
+from jsonschema import validate as js_v
+
+from inspect import getfullargspec as getspec
+
+#from bs4 import BeautifulSoup
+
+def read_file(file_to_read):
+    """Reads a file specified by 'file_to_read' and returns (True,<its content as a string>) in case of success or (False, <error message>) in case of failure"""
+    try:
+        f = open(file_to_read, 'r')
+        read_data = f.read()
+        f.close()
+    except Exception as e:
+        return (False, str(e))
+
+    return (True, read_data)
+
+def write_file(file_to_write, text):
+    """Write a file specified by 'file_to_write' and returns (True,NOne) in case of success or (False, <error message>) in case of failure"""
+    try:
+        f = open(file_to_write, 'w')
+        f.write(text)
+        f.close()
+    except Exception as e:
+        return (False, str(e))
+
+    return (True, None)
+
+def format_in(http_response, schema):
+    try:
+        client_data = http_response.json()
+        js_v(client_data, schema)
+        #print "Input data: ", str(client_data)
+        return True, client_data
+    except js_e.ValidationError as exc:
+        print("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+        return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+
+def remove_extra_items(data, schema):
+    deleted = []
+    if isinstance(data, (tuple, list)):
+        for d in data:
+            a = remove_extra_items(d, schema['items'])
+            if a:
+                deleted.append(a)
+    elif isinstance(data, dict):
+        # TODO deal with patternProperties
+        if 'properties' not in schema:
+            return None
+        to_delete = []
+        for k in data.keys():
+            if k in schema['properties']:
+                a = remove_extra_items(data[k], schema['properties'][k])
+                if a:
+                    deleted.append({k: a})
+            elif not schema.get('additionalProperties'):
+                to_delete.append(k)
+                deleted.append(k)
+        for k in to_delete:
+            del data[k]
+    if len(deleted) == 0:
+        return None
+    elif len(deleted) == 1:
+        return deleted[0]
+
+    return deleted
+
+#def format_html2text(http_content):
+#    soup=BeautifulSoup(http_content)
+#    text = soup.p.get_text() + " " + soup.pre.get_text()
+#    return text
+
+
+def delete_nulls(var):
+    if isinstance(var, dict):
+        to_delete = []
+        for k in var.keys():
+            if var[k] is None:
+                to_delete.append([k])
+            elif isinstance(var[k], (dict, list, tuple)):
+                if delete_nulls(var[k]):
+                    to_delete.append(k)
+        for k in to_delete:
+            del var[k]
+        if len(var) == 0:
+            return True
+    elif isinstance(var, (list, tuple)):
+        for k in var:
+            if isinstance(k, dict):
+                delete_nulls(k)
+        if len(var) == 0:
+            return True
+    return False
+
+
+def convert_bandwidth(data, reverse=False):
+    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
+    It assumes that bandwidth is well formed
+    Attributes:
+        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+        'reverse': by default convert form str to int (Mbps), if True it convert from number to units
+    Return:
+        None
+    '''
+    if isinstance(data, dict):
+        for k in data.keys():
+            if isinstance(data[k], (dict, tuple, list)):
+                convert_bandwidth(data[k], reverse)
+        if "bandwidth" in data:
+            try:
+                value=str(data["bandwidth"])
+                if not reverse:
+                    pos = value.find("bps")
+                    if pos>0:
+                        if value[pos-1]=="G":
+                            data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000
+                        elif value[pos-1]=="k":
+                            data["bandwidth"]= int(data["bandwidth"][:pos-1]) // 1000
+                        else:
+                            data["bandwidth"]= int(data["bandwidth"][:pos])
+                else:
+                    value = int(data["bandwidth"])
+                    if value % 1000 == 0 and value > 1000:
+                        data["bandwidth"] = str(value // 1000) + " Gbps"
+                    else:
+                        data["bandwidth"] = str(value) + " Mbps"
+            except:
+                print("convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"])
+                return
+    if isinstance(data, (tuple, list)):
+        for k in data:
+            if isinstance(k, (dict, tuple, list)):
+                convert_bandwidth(k, reverse)
+
+def convert_float_timestamp2str(var):
+    '''Converts timestamps (created_at, modified_at fields) represented as float
+    to a string with the format '%Y-%m-%dT%H:%i:%s'
+    It enters recursively in the dict var finding this kind of variables
+    '''
+    if type(var) is dict:
+        for k,v in var.items():
+            if type(v) is float and k in ("created_at", "modified_at"):
+                var[k] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(v) )
+            elif type(v) is dict or type(v) is list or type(v) is tuple:
+                convert_float_timestamp2str(v)
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for v in var:
+            convert_float_timestamp2str(v)
+
+def convert_datetime2str(var):
+    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+    It enters recursively in the dict var finding this kind of variables
+    '''
+    if type(var) is dict:
+        for k,v in var.items():
+            if type(v) is datetime.datetime:
+                var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
+            elif type(v) is dict or type(v) is list or type(v) is tuple:
+                convert_datetime2str(v)
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for v in var:
+            convert_datetime2str(v)
+
+def convert_str2boolean(data, items):
+    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
+    Done recursively
+    Attributes:
+        'data': dictionary variable to be checked. None or empty is considered valid
+        'items': tuple of keys to convert
+    Return:
+        None
+    '''
+    if type(data) is dict:
+        for k in data.keys():
+            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+                convert_str2boolean(data[k], items)
+            if k in items:
+                if type(data[k]) is str:
+                    if   data[k]=="false" or data[k]=="False": data[k]=False
+                    elif data[k]=="true"  or data[k]=="True":  data[k]=True
+    if type(data) is tuple or type(data) is list:
+        for k in data:
+            if type(k) is dict or type(k) is tuple or type(k) is list:
+                convert_str2boolean(k, items)
+
+def check_valid_uuid(uuid):
+    id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+    id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
+    try:
+        js_v(uuid, id_schema)
+        return True
+    except js_e.ValidationError:
+        try:
+            js_v(uuid, id_schema2)
+            return True
+        except js_e.ValidationError:
+            return False
+    return False
+
+
+def expand_brackets(text):
+    """
+    Change a text with TEXT[ABC..] into a list with [TEXTA, TEXTB, TEXC, ...
+    if no bracket is used it just return the a list with the single text
+    It uses recursivity to allow several [] in the text
+    :param text:
+    :return:
+    """
+    if text is None:
+        return (None, )
+    start = text.find("[")
+    end = text.find("]")
+    if start < 0 or end < 0:
+        return [text]
+    text_list = []
+    for char in text[start+1:end]:
+        text_list += expand_brackets(text[:start] + char + text[end+1:])
+    return text_list
+
+def deprecated(message):
+  def deprecated_decorator(func):
+      def deprecated_func(*args, **kwargs):
+          warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+                        category=DeprecationWarning,
+                        stacklevel=2)
+          warnings.simplefilter('default', DeprecationWarning)
+          return func(*args, **kwargs)
+      return deprecated_func
+  return deprecated_decorator
+
+
+def truncate(text, max_length=1024):
+    """Limit huge texts in number of characters"""
+    text = str(text)
+    if text and len(text) >= max_length:
+        return text[:max_length//2-3] + " ... " + text[-max_length//2+3:]
+    return text
+
+
+def merge_dicts(*dicts, **kwargs):
+    """Creates a new dict merging N others and keyword arguments.
+    Right-most dicts take precedence.
+    Keyword args take precedence.
+    """
+    return reduce(
+        lambda acc, x: acc.update(x) or acc,
+        list(dicts) + [kwargs], {})
+
+
+def remove_none_items(adict):
+    """Return a similar dict without keys associated to None values"""
+    return {k: v for k, v in adict.items() if v is not None}
+
+
+def filter_dict_keys(adict, allow):
+    """Return a similar dict, but just containing the explicitly allowed keys
+
+    Arguments:
+        adict (dict): Simple python dict data struct
+        allow (list): Explicits allowed keys
+    """
+    return {k: v for k, v in adict.items() if k in allow}
+
+
+def filter_out_dict_keys(adict, deny):
+    """Return a similar dict, but not containing the explicitly denied keys
+
+    Arguments:
+        adict (dict): Simple python dict data struct
+        deny (list): Explicits denied keys
+    """
+    return {k: v for k, v in adict.items() if k not in deny}
+
+
+def expand_joined_fields(record):
+    """Given a db query result, explode the fields that contains `.` (join
+    operations).
+
+    Example
+        >> expand_joined_fiels({'wim.id': 2})
+        # {'wim': {'id': 2}}
+    """
+    result = {}
+    for field, value in record.items():
+        keys = field.split('.')
+        target = result
+        target = reduce(lambda target, key: target.setdefault(key, {}),
+                        keys[:-1], result)
+        target[keys[-1]] = value
+
+    return result
+
+
+def ensure(condition, exception):
+    """Raise an exception if condition is not met"""
+    if not condition:
+        raise exception
+
+
+def partition(predicate, iterable):
+    """Create two derived iterators from a single one
+    The first iterator created will loop thought the values where the function
+    predicate is True, the second one will iterate over the values where it is
+    false.
+    """
+    iterable1, iterable2 = tee(iterable)
+    return filter(predicate, iterable2), filterfalse(predicate, iterable1)
+
+
+def pipe(*functions):
+    """Compose functions of one argument in the opposite order,
+    So pipe(f, g)(x) = g(f(x))
+    """
+    return lambda x: reduce(lambda acc, f: f(acc), functions, x)
+
+
+def compose(*functions):
+    """Compose functions of one argument,
+    So compose(f, g)(x) = f(g(x))
+    """
+    return lambda x: reduce(lambda acc, f: f(acc), functions[::-1], x)
+
+
+def safe_get(target, key_path, default=None):
+    """Given a path of keys (eg.: "key1.key2.key3"), return a nested value in
+    a nested dict if present, or the default value
+    """
+    keys = key_path.split('.')
+    target = reduce(lambda acc, key: acc.get(key) or {}, keys[:-1], target)
+    return target.get(keys[-1], default)
+
+
+class Attempt(object):
+    """Auxiliary class to be used in an attempt to retry executing a failing
+    procedure
+
+    Attributes:
+        count (int): 0-based "retries" counter
+        max_attempts (int): maximum number of "retries" allowed
+        info (dict): extra information about the specific attempt
+            (can be used to produce more meaningful error messages)
+    """
+    __slots__ = ('count', 'max', 'info')
+
+    MAX = 3
+
+    def __init__(self, count=0, max_attempts=MAX, info=None):
+        self.count = count
+        self.max = max_attempts
+        self.info = info or {}
+
+    @property
+    def countdown(self):
+        """Like count, but in the opposite direction"""
+        return self.max - self.count
+
+    @property
+    def number(self):
+        """1-based counter"""
+        return self.count + 1
+
+
+def inject_args(fn=None, **args):
+    """Partially apply keyword arguments in a function, but only if the function
+    define them in the first place
+    """
+    if fn is None:  # Allows calling the decorator directly or with parameters
+        return partial(inject_args, **args)
+
+    spec = getspec(fn)
+    return wraps(fn)(partial(fn, **filter_dict_keys(args, spec.args)))
+
+
+def get_arg(name, fn, args, kwargs):
+    """Find the value of an argument for a function, given its argument list.
+
+    This function can be used to display more meaningful errors for debugging
+    """
+    if name in kwargs:
+        return kwargs[name]
+
+    spec = getspec(fn)
+    if name in spec.args:
+        i = spec.args.index(name)
+        return args[i] if i < len(args) else None
+
+    return None
diff --git a/RO/osm_ro/vim_thread.py b/RO/osm_ro/vim_thread.py
new file mode 100644 (file)
index 0000000..1e1e6d2
--- /dev/null
@@ -0,0 +1,1303 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+""""
+This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
+The tasks are stored at database in table vim_wim_actions
+Several vim_wim_actions can refer to the same element at VIM (flavor, network, ...). This is somethng to avoid if RO
+is migrated to a non-relational database as mongo db. Each vim_wim_actions reference a different instance_Xxxxx
+In this case "related" colunm contains the same value, to know they refer to the same vim. In case of deletion, it
+there is related tasks using this element, it is not deleted, The vim_info needed to delete is transfered to other task
+
+The task content is (M: stored at memory, D: stored at database):
+    MD  instance_action_id:  reference a global action over an instance-scenario: database instance_actions
+    MD  task_index:     index number of the task. This together with the previous forms a unique key identifier
+    MD  datacenter_vim_id:  should contain the uuid of the VIM managed by this thread
+    MD  vim_id:     id of the vm,net,etc at VIM
+    MD  item:       database table name, can be instance_vms, instance_nets, TODO: datacenter_flavors, datacenter_images
+    MD  item_id:    uuid of the referenced entry in the previous table
+    MD  action:     CREATE, DELETE, FIND
+    MD  status:     SCHEDULED: action need to be done
+                    BUILD: not used
+                    DONE: Done and it must be polled to VIM periodically to see status. ONLY for action=CREATE or FIND
+                    FAILED: It cannot be created/found/deleted
+                    FINISHED: similar to DONE, but no refresh is needed anymore. Task is maintained at database but
+                        it is never processed by any thread
+                    SUPERSEDED: similar to FINSISHED, but nothing has been done to completed the task.
+    MD  extra:      text with yaml format at database, dict at memory with:
+            params:     list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken
+                        from other related tasks
+            find:       (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains
+                        the FIND params
+            depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends
+                        on a net creation
+                        can contain an int (single index on the same instance-action) or str (compete action ID)
+            sdn_net_id: used for net.
+            interfaces: used for VMs. Each key is the uuid of the instance_interfaces entry at database
+                iface_id: uuid of intance_interfaces
+                sdn_port_id:
+                sdn_net_id:
+                vim_info
+            created_items: dictionary with extra elements created that need to be deleted. e.g. ports, volumes,...
+            created:    False if the VIM element is not created by other actions, and it should not be deleted
+            vim_status: VIM status of the element. Stored also at database in the instance_XXX
+            vim_info:   Detailed information of a vm/net from the VIM. Stored at database in the instance_XXX but not at
+                        vim_wim_actions
+    M   depends:    dict with task_index(from depends_on) to dependency task
+    M   params:     same as extra[params]
+    MD  error_msg:  descriptive text upon an error.Stored also at database instance_XXX
+    MD  created_at: task creation time. The task of creation must be the oldest
+    MD  modified_at: next time task need to be processed. For example, for a refresh, it contain next time refresh must
+                     be done
+    MD related:     All the tasks over the same VIM element have same "related". Note that other VIMs can contain the
+                    same value of related, but this thread only process those task of one VIM.  Also related can be the
+                    same among several NS os isntance-scenarios
+    MD worker:      Used to lock in case of several thread workers.
+
+"""
+
+import threading
+import time
+import queue
+import logging
+from osm_ro import vimconn
+import yaml
+from osm_ro.db_base import db_base_Exception
+# TODO py3 BEGIN
+class ovimException(Exception):
+    pass
+# TODO py3 END
+from copy import deepcopy
+
+__author__ = "Alfonso Tierno, Pablo Montes"
+__date__ = "$28-Sep-2017 12:07:15$"
+
+
+def is_task_id(task_id):
+    return task_id.startswith("TASK-")
+
+
+class VimThreadException(Exception):
+    pass
+
+
+class VimThreadExceptionNotFound(VimThreadException):
+    pass
+
+
+class vim_thread(threading.Thread):
+    REFRESH_BUILD = 5  # 5 seconds
+    REFRESH_ACTIVE = 60  # 1 minute
+    REFRESH_ERROR = 600
+    REFRESH_DELETE = 3600 * 10
+
+    def __init__(self, task_lock, plugins, name=None, datacenter_name=None, datacenter_tenant_id=None,
+                 db=None, db_lock=None, ovim=None):
+        """Init a thread.
+        Arguments:
+            'id' number of thead
+            'name' name of thread
+            'host','user':  host ip or name to manage and user
+            'db', 'db_lock': database class and lock to use it in exclusion
+        """
+        threading.Thread.__init__(self)
+        self.plugins = plugins
+        self.vim = None
+        self.error_status = None
+        self.datacenter_name = datacenter_name
+        self.datacenter_tenant_id = datacenter_tenant_id
+        self.ovim = ovim
+        if not name:
+            self.name = vimconn["id"] + "." + vimconn["config"]["datacenter_tenant_id"]
+        else:
+            self.name = name
+        self.vim_persistent_info = {}
+        self.my_id = self.name[:64]
+
+        self.logger = logging.getLogger('openmano.vim.' + self.name)
+        self.db = db
+        self.db_lock = db_lock
+
+        self.task_lock = task_lock
+        self.task_queue = queue.Queue(2000)
+
+    def get_vimconnector(self):
+        try:
+            from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
+            select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+                       'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+                       'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+                       'user', 'passwd', 'dt.config as dt_config')
+            where_ = {"dt.uuid": self.datacenter_tenant_id}
+            vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
+            vim = vims[0]
+            vim_config = {}
+            if vim["config"]:
+                vim_config.update(yaml.load(vim["config"], Loader=yaml.Loader))
+            if vim["dt_config"]:
+                vim_config.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+            vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
+            vim_config['datacenter_id'] = vim.get('datacenter_id')
+
+            # get port_mapping
+            with self.db_lock:
+                vim_config["wim_external_ports"] = self.ovim.get_of_port_mappings(
+                    db_filter={"region": vim_config['datacenter_id'], "pci": None})
+
+            self.vim = self.plugins["rovim_" + vim["type"]].vimconnector(
+                uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+                tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+                url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+                user=vim['user'], passwd=vim['passwd'],
+                config=vim_config, persistent_info=self.vim_persistent_info
+            )
+            self.error_status = None
+        except Exception as e:
+            self.logger.error("Cannot load vimconnector for vim_account {}: {}".format(self.datacenter_tenant_id, e))
+            self.vim = None
+            self.error_status = "Error loading vimconnector: {}".format(e)
+
+    def _get_db_task(self):
+        """
+        Read actions from database and reload them at memory. Fill self.refresh_list, pending_list, vim_actions
+        :return: None
+        """
+        now = time.time()
+        try:
+            database_limit = 20
+            task_related = None
+            while True:
+                # get 20 (database_limit) entries each time
+                vim_actions = self.db.get_rows(FROM="vim_wim_actions",
+                                               WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                      "status": ['SCHEDULED', 'BUILD', 'DONE'],
+                                                      "worker": [None, self.my_id], "modified_at<=": now
+                                                      },
+                                               ORDER_BY=("modified_at", "created_at",),
+                                               LIMIT=database_limit)
+                if not vim_actions:
+                    return None, None
+                # if vim_actions[0]["modified_at"] > now:
+                #     return int(vim_actions[0] - now)
+                for task in vim_actions:
+                    # block related task
+                    if task_related == task["related"]:
+                        continue  # ignore if a locking has already tried for these task set
+                    task_related = task["related"]
+                    # lock ...
+                    self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
+                                        WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                               "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                               "worker": [None, self.my_id],
+                                               "related": task_related,
+                                               "item": task["item"],
+                                               })
+                    # ... and read all related and check if locked
+                    related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+                                                     WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                            "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                            "related": task_related,
+                                                            "item": task["item"],
+                                                            },
+                                                     ORDER_BY=("created_at",))
+                    # check that all related tasks have been locked. If not release and try again. It can happen
+                    # for race conditions if a new related task has been inserted by nfvo in the process
+                    some_tasks_locked = False
+                    some_tasks_not_locked = False
+                    creation_task = None
+                    for relate_task in related_tasks:
+                        if relate_task["worker"] != self.my_id:
+                            some_tasks_not_locked = True
+                        else:
+                            some_tasks_locked = True
+                        if not creation_task and relate_task["action"] in ("CREATE", "FIND"):
+                            creation_task = relate_task
+                    if some_tasks_not_locked:
+                        if some_tasks_locked:  # unlock
+                            self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
+                                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                       "worker": self.my_id,
+                                                       "related": task_related,
+                                                       "item": task["item"],
+                                                       })
+                        continue
+
+                    # task of creation must be the first in the list of related_task
+                    assert(related_tasks[0]["action"] in ("CREATE", "FIND"))
+
+                    task["params"] = None
+                    if task["extra"]:
+                        extra = yaml.load(task["extra"], Loader=yaml.Loader)
+                    else:
+                        extra = {}
+                    task["extra"] = extra
+                    if extra.get("depends_on"):
+                        task["depends"] = {}
+                    if extra.get("params"):
+                        task["params"] = deepcopy(extra["params"])
+                    return task, related_tasks
+        except Exception as e:
+            self.logger.critical("Unexpected exception at _get_db_task: " + str(e), exc_info=True)
+            return None, None
+
+    def _delete_task(self, task):
+        """
+        Determine if this task need to be done or superseded
+        :return: None
+        """
+
+        def copy_extra_created(copy_to, copy_from):
+            copy_to["created"] = copy_from["created"]
+            if copy_from.get("sdn_net_id"):
+                copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
+            if copy_from.get("interfaces"):
+                copy_to["interfaces"] = copy_from["interfaces"]
+            if copy_from.get("created_items"):
+                if not copy_to.get("created_items"):
+                    copy_to["created_items"] = {}
+                copy_to["created_items"].update(copy_from["created_items"])
+
+        task_create = None
+        dependency_task = None
+        deletion_needed = False
+        if task["status"] == "FAILED":
+            return   # TODO need to be retry??
+        try:
+            # get all related tasks
+            related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+                                             WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                    "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                    "action": ["FIND", "CREATE"],
+                                                    "related": task["related"],
+                                                    },
+                                             ORDER_BY=("created_at",),
+                                             )
+            for related_task in related_tasks:
+                if related_task["item"] == task["item"] and related_task["item_id"] == task["item_id"]:
+                    task_create = related_task
+                    # TASK_CREATE
+                    if related_task["extra"]:
+                        extra_created = yaml.load(related_task["extra"], Loader=yaml.Loader)
+                        if extra_created.get("created"):
+                            deletion_needed = True
+                        related_task["extra"] = extra_created
+                elif not dependency_task:
+                    dependency_task = related_task
+                if task_create and dependency_task:
+                    break
+
+            # mark task_create as FINISHED
+            self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
+                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                       "instance_action_id": task_create["instance_action_id"],
+                                       "task_index": task_create["task_index"]
+                                       })
+            if not deletion_needed:
+                return
+            elif dependency_task:
+                # move create information  from task_create to relate_task
+                extra_new_created = yaml.load(dependency_task["extra"], Loader=yaml.Loader) or {}
+                extra_new_created["created"] = extra_created["created"]
+                copy_extra_created(copy_to=extra_new_created, copy_from=extra_created)
+
+                self.db.update_rows("vim_wim_actions",
+                                    UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
+                                                                    width=256),
+                                            "vim_id": task_create.get("vim_id")},
+                                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                           "instance_action_id": dependency_task["instance_action_id"],
+                                           "task_index": dependency_task["task_index"]
+                                           })
+                return False
+            else:
+                task["vim_id"] = task_create["vim_id"]
+                copy_extra_created(copy_to=task["extra"], copy_from=task_create["extra"])
+                return True
+
+        except Exception as e:
+            self.logger.critical("Unexpected exception at _delete_task: " + str(e), exc_info=True)
+
+    def _refres_vm(self, task):
+        """Call VIM to get VMs status"""
+        database_update = None
+
+        vim_id = task["vim_id"]
+        vm_to_refresh_list = [vim_id]
+        try:
+            vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
+            vim_info = vim_dict[vim_id]
+        except vimconn.vimconnException as e:
+            # Mark all tasks at VIM_ERROR status
+            self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
+            vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+        # check and update interfaces
+        task_warning_msg = ""
+        for interface in vim_info.get("interfaces", ()):
+            vim_interface_id = interface["vim_interface_id"]
+            if vim_interface_id not in task["extra"]["interfaces"]:
+                self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
+                    task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
+                continue
+            task_interface = task["extra"]["interfaces"][vim_interface_id]
+            task_vim_interface = task_interface.get("vim_info")
+            if task_vim_interface != interface:
+                # delete old port
+                if task_interface.get("sdn_port_id"):
+                    try:
+                        with self.db_lock:
+                            self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
+                            task_interface["sdn_port_id"] = None
+                    except ovimException as e:
+                        error_text = "ovimException deleting external_port={}: {}".format(
+                            task_interface["sdn_port_id"], e)
+                        self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+                        task_warning_msg += error_text
+                        # TODO Set error_msg at instance_nets instead of instance VMs
+
+                # Create SDN port
+                sdn_net_id = task_interface.get("sdn_net_id")
+                if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+                    sdn_port_name = sdn_net_id + "." + task["vim_id"]
+                    sdn_port_name = sdn_port_name[:63]
+                    try:
+                        with self.db_lock:
+                            sdn_port_id = self.ovim.new_external_port(
+                                {"compute_node": interface["compute_node"],
+                                    "pci": interface["pci"],
+                                    "vlan": interface.get("vlan"),
+                                    "net_id": sdn_net_id,
+                                    "region": self.vim["config"]["datacenter_id"],
+                                    "name": sdn_port_name,
+                                    "mac": interface.get("mac_address")})
+                            task_interface["sdn_port_id"] = sdn_port_id
+                    except (ovimException, Exception) as e:
+                        error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
+                            format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
+                        self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+                        task_warning_msg += error_text
+                        # TODO Set error_msg at instance_nets instead of instance VMs
+
+                self.db.update_rows('instance_interfaces',
+                                    UPDATE={"mac_address": interface.get("mac_address"),
+                                            "ip_address": interface.get("ip_address"),
+                                            "vim_interface_id": interface.get("vim_interface_id"),
+                                            "vim_info": interface.get("vim_info"),
+                                            "sdn_port_id": task_interface.get("sdn_port_id"),
+                                            "compute_node": interface.get("compute_node"),
+                                            "pci": interface.get("pci"),
+                                            "vlan": interface.get("vlan")},
+                                    WHERE={'uuid': task_interface["iface_id"]})
+                task_interface["vim_info"] = interface
+
+        # check and update task and instance_vms database
+        vim_info_error_msg = None
+        if vim_info.get("error_msg"):
+            vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
+        elif task_warning_msg:
+            vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
+        task_vim_info = task["extra"].get("vim_info")
+        task_error_msg = task.get("error_msg")
+        task_vim_status = task["extra"].get("vim_status")
+        if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+                (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+            database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+            if vim_info.get("vim_info"):
+                database_update["vim_info"] = vim_info["vim_info"]
+
+            task["extra"]["vim_status"] = vim_info["status"]
+            task["error_msg"] = vim_info_error_msg
+            if vim_info.get("vim_info"):
+                task["extra"]["vim_info"] = vim_info["vim_info"]
+
+        return database_update
+
+    def _refres_net(self, task):
+        """Call VIM to get network status"""
+        database_update = None
+
+        vim_id = task["vim_id"]
+        net_to_refresh_list = [vim_id]
+        try:
+            vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
+            vim_info = vim_dict[vim_id]
+        except vimconn.vimconnException as e:
+            # Mark all tasks at VIM_ERROR status
+            self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
+            vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+        task_vim_info = task["extra"].get("vim_info")
+        task_vim_status = task["extra"].get("vim_status")
+        task_error_msg = task.get("error_msg")
+        task_sdn_net_id = task["extra"].get("sdn_net_id")
+
+        vim_info_status = vim_info["status"]
+        vim_info_error_msg = vim_info.get("error_msg")
+        # get ovim status
+        if task_sdn_net_id:
+            try:
+                with self.db_lock:
+                    sdn_net = self.ovim.show_network(task_sdn_net_id)
+            except (ovimException, Exception) as e:
+                text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
+                self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
+                sdn_net = {"status": "ERROR", "last_error": text_error}
+            if sdn_net["status"] == "ERROR":
+                if not vim_info_error_msg:
+                    vim_info_error_msg = str(sdn_net.get("last_error"))
+                else:
+                    vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+                        self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
+                        self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
+                vim_info_status = "ERROR"
+            elif sdn_net["status"] == "BUILD":
+                if vim_info_status == "ACTIVE":
+                    vim_info_status = "BUILD"
+
+        # update database
+        if vim_info_error_msg:
+            vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
+        if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
+                (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+            task["extra"]["vim_status"] = vim_info_status
+            task["error_msg"] = vim_info_error_msg
+            if vim_info.get("vim_info"):
+                task["extra"]["vim_info"] = vim_info["vim_info"]
+            database_update = {"status": vim_info_status, "error_msg": vim_info_error_msg}
+            if vim_info.get("vim_info"):
+                database_update["vim_info"] = vim_info["vim_info"]
+        return database_update
+
+    def _proccess_pending_tasks(self, task, related_tasks):
+        old_task_status = task["status"]
+        create_or_find = False   # if as result of processing this task something is created or found
+        next_refresh = 0
+
+        try:
+            if task["status"] == "SCHEDULED":
+                # check if tasks that this depends on have been completed
+                dependency_not_completed = False
+                dependency_modified_at = 0
+                for task_index in task["extra"].get("depends_on", ()):
+                    task_dependency = self._look_for_task(task["instance_action_id"], task_index)
+                    if not task_dependency:
+                        raise VimThreadException(
+                            "Cannot get depending net task trying to get depending task {}.{}".format(
+                                task["instance_action_id"], task_index))
+                    # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so
+                    # database must be look again
+                    if task_dependency["status"] == "SCHEDULED":
+                        dependency_not_completed = True
+                        dependency_modified_at = task_dependency["modified_at"]
+                        break
+                    elif task_dependency["status"] == "FAILED":
+                        raise VimThreadException(
+                            "Cannot {} {}, (task {}.{}) because depends on failed {}.{}, (task{}.{}): {}".format(
+                                task["action"], task["item"],
+                                task["instance_action_id"], task["task_index"],
+                                task_dependency["instance_action_id"], task_dependency["task_index"],
+                                task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
+
+                    task["depends"]["TASK-"+str(task_index)] = task_dependency
+                    task["depends"]["TASK-{}.{}".format(task["instance_action_id"], task_index)] = task_dependency
+                if dependency_not_completed:
+                    # Move this task to the time dependency is going to be modified plus 10 seconds.
+                    self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
+                                        UPDATE={"worker": None},
+                                        WHERE={"datacenter_vim_id": self.datacenter_tenant_id, "worker": self.my_id,
+                                               "related": task["related"],
+                                               })
+                    # task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
+                    # if task["extra"]["tries"] > 3:
+                    #     raise VimThreadException(
+                    #         "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
+                    #         "(task {}.{})".format(task["action"], task["item"],
+                    #                               task["instance_action_id"], task["task_index"],
+                    #                               task_dependency["instance_action_id"], task_dependency["task_index"]
+                    #                               task_dependency["action"], task_dependency["item"]))
+                    return
+
+            database_update = None
+            if task["action"] == "DELETE":
+                deleted_needed = self._delete_task(task)
+                if not deleted_needed:
+                    task["status"] = "SUPERSEDED"  # with FINISHED instead of DONE it will not be refreshing
+                    task["error_msg"] = None
+
+            if task["status"] == "SUPERSEDED":
+                # not needed to do anything but update database with the new status
+                database_update = None
+            elif not self.vim:
+                task["status"] = "FAILED"
+                task["error_msg"] = self.error_status
+                database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+            elif task["item_id"] != related_tasks[0]["item_id"] and task["action"] in ("FIND", "CREATE"):
+                # Do nothing, just copy values from one to another and updata database
+                task["status"] = related_tasks[0]["status"]
+                task["error_msg"] = related_tasks[0]["error_msg"]
+                task["vim_id"] = related_tasks[0]["vim_id"]
+                extra = yaml.load(related_tasks[0]["extra"], Loader=yaml.Loader)
+                task["extra"]["vim_status"] = extra.get("vim_status")
+                next_refresh = related_tasks[0]["modified_at"] + 0.001
+                database_update = {"status": task["extra"].get("vim_status", "VIM_ERROR"),
+                                   "error_msg": task["error_msg"]}
+                if task["item"] == 'instance_vms':
+                    database_update["vim_vm_id"] = task["vim_id"]
+                elif task["item"] == 'instance_nets':
+                    database_update["vim_net_id"] = task["vim_id"]
+            elif task["item"] == 'instance_vms':
+                if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                    database_update = self._refres_vm(task)
+                    create_or_find = True
+                elif task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_vm(task)
+                elif task["action"] == "DELETE":
+                    self.del_vm(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_nets':
+                if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                    database_update = self._refres_net(task)
+                    create_or_find = True
+                elif task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_net(task)
+                elif task["action"] == "DELETE":
+                    self.del_net(task)
+                elif task["action"] == "FIND":
+                    database_update = self.get_net(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_sfis':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_sfi(task)
+                elif task["action"] == "DELETE":
+                    self.del_sfi(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_sfs':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_sf(task)
+                elif task["action"] == "DELETE":
+                    self.del_sf(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_classifications':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_classification(task)
+                elif task["action"] == "DELETE":
+                    self.del_classification(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_sfps':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_sfp(task)
+                elif task["action"] == "DELETE":
+                    self.del_sfp(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            else:
+                raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
+                # TODO
+        except VimThreadException as e:
+            task["error_msg"] = str(e)
+            task["status"] = "FAILED"
+            database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+            if task["item"] == 'instance_vms':
+                database_update["vim_vm_id"] = None
+            elif task["item"] == 'instance_nets':
+                database_update["vim_net_id"] = None
+
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
+            task_id, task["item"], task["action"], task["status"],
+            task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
+        try:
+            if not next_refresh:
+                if task["status"] == "DONE":
+                    next_refresh = time.time()
+                    if task["extra"].get("vim_status") == "BUILD":
+                        next_refresh += self.REFRESH_BUILD
+                    elif task["extra"].get("vim_status") in ("ERROR", "VIM_ERROR"):
+                        next_refresh += self.REFRESH_ERROR
+                    elif task["extra"].get("vim_status") == "DELETED":
+                        next_refresh += self.REFRESH_DELETE
+                    else:
+                        next_refresh += self.REFRESH_ACTIVE
+                elif task["status"] == "FAILED":
+                    next_refresh = time.time() + self.REFRESH_DELETE
+
+            if create_or_find:
+                # modify all related task with action FIND/CREATED non SCHEDULED
+                self.db.update_rows(
+                    table="vim_wim_actions", modified_time=next_refresh + 0.001,
+                    UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+                            "error_msg": task["error_msg"],
+                            },
+
+                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                           "worker": self.my_id,
+                           "action": ["FIND", "CREATE"],
+                           "related": task["related"],
+                           "status<>": "SCHEDULED",
+                           })
+            # modify own task
+            self.db.update_rows(
+                table="vim_wim_actions", modified_time=next_refresh,
+                UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+                        "error_msg": task["error_msg"],
+                        "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
+                WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
+            # Unlock tasks
+            self.db.update_rows(
+                table="vim_wim_actions", modified_time=0,
+                UPDATE={"worker": None},
+                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                       "worker": self.my_id,
+                       "related": task["related"],
+                       })
+
+            # Update table instance_actions
+            if old_task_status == "SCHEDULED" and task["status"] != old_task_status:
+                self.db.update_rows(
+                    table="instance_actions",
+                    UPDATE={("number_failed" if task["status"] == "FAILED" else "number_done"): {"INCREMENT": 1}},
+                    WHERE={"uuid": task["instance_action_id"]})
+            if database_update:
+                where_filter = {"related": task["related"]}
+                if task["item"] == "instance_nets" and task["datacenter_vim_id"]:
+                    where_filter["datacenter_tenant_id"] = task["datacenter_vim_id"] 
+                self.db.update_rows(table=task["item"],
+                                    UPDATE=database_update,
+                                    WHERE=where_filter)
+        except db_base_Exception as e:
+            self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
+
+    def insert_task(self, task):
+        try:
+            self.task_queue.put(task, False)
+            return None
+        except queue.Full:
+            raise vimconn.vimconnException(self.name + ": timeout inserting a task")
+
+    def del_task(self, task):
+        with self.task_lock:
+            if task["status"] == "SCHEDULED":
+                task["status"] = "SUPERSEDED"
+                return True
+            else:  # task["status"] == "processing"
+                self.task_lock.release()
+                return False
+
+    def run(self):
+        self.logger.debug("Starting")
+        while True:
+            self.get_vimconnector()
+            self.logger.debug("Vimconnector loaded")
+            reload_thread = False
+
+            while True:
+                try:
+                    while not self.task_queue.empty():
+                        task = self.task_queue.get()
+                        if isinstance(task, list):
+                            pass
+                        elif isinstance(task, str):
+                            if task == 'exit':
+                                return 0
+                            elif task == 'reload':
+                                reload_thread = True
+                                break
+                        self.task_queue.task_done()
+                    if reload_thread:
+                        break
+
+                    task, related_tasks = self._get_db_task()
+                    if task:
+                        self._proccess_pending_tasks(task, related_tasks)
+                    else:
+                        time.sleep(5)
+
+                except Exception as e:
+                    self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+
+        self.logger.debug("Finishing")
+
+    def _look_for_task(self, instance_action_id, task_id):
+        """
+        Look for a concrete task at vim_actions database table
+        :param instance_action_id: The instance_action_id
+        :param task_id: Can have several formats:
+            <task index>: integer
+            TASK-<task index> :backward compatibility,
+            [TASK-]<instance_action_id>.<task index>: this instance_action_id overrides the one in the parameter
+        :return: Task dictionary or None if not found
+        """
+        if isinstance(task_id, int):
+            task_index = task_id
+        else:
+            if task_id.startswith("TASK-"):
+                task_id = task_id[5:]
+            ins_action_id, _, task_index = task_id.rpartition(".")
+            if ins_action_id:
+                instance_action_id = ins_action_id
+
+        tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
+                                                                "task_index": task_index})
+        if not tasks:
+            return None
+        task = tasks[0]
+        task["params"] = None
+        task["depends"] = {}
+        if task["extra"]:
+            extra = yaml.load(task["extra"], Loader=yaml.Loader)
+            task["extra"] = extra
+            task["params"] = extra.get("params")
+        else:
+            task["extra"] = {}
+        return task
+
+    @staticmethod
+    def _format_vim_error_msg(error_text, max_length=1024):
+        if error_text and len(error_text) >= max_length:
+            return error_text[:max_length // 2 - 3] + " ... " + error_text[-max_length // 2 + 3:]
+        return error_text
+
+    def new_vm(self, task):
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        try:
+            params = task["params"]
+            depends = task.get("depends")
+            net_list = params[5]
+            for net in net_list:
+                if "net_id" in net and is_task_id(net["net_id"]):  # change task_id into network_id
+                    network_id = task["depends"][net["net_id"]].get("vim_id")
+                    if not network_id:
+                        raise VimThreadException(
+                            "Cannot create VM because depends on a network not created or found: " +
+                            str(depends[net["net_id"]]["error_msg"]))
+                    net["net_id"] = network_id
+            params_copy = deepcopy(params)
+            vim_vm_id, created_items = self.vim.new_vminstance(*params_copy)
+
+            # fill task_interfaces. Look for snd_net_id at database for each interface
+            task_interfaces = {}
+            for iface in params_copy[5]:
+                task_interfaces[iface["vim_id"]] = {"iface_id": iface["uuid"]}
+                result = self.db.get_rows(
+                    SELECT=('sdn_net_id', 'interface_id'),
+                    FROM='instance_nets as ine join instance_interfaces as ii on ii.instance_net_id=ine.uuid',
+                    WHERE={'ii.uuid': iface["uuid"]})
+                if result:
+                    task_interfaces[iface["vim_id"]]["sdn_net_id"] = result[0]['sdn_net_id']
+                    task_interfaces[iface["vim_id"]]["interface_id"] = result[0]['interface_id']
+                else:
+                    self.logger.critical("task={} new-VM: instance_nets uuid={} not found at DB".format(task_id,
+                                                                                                        iface["uuid"]),
+                                         exc_info=True)
+
+            task["vim_info"] = {}
+            task["extra"]["interfaces"] = task_interfaces
+            task["extra"]["created"] = True
+            task["extra"]["created_items"] = created_items
+            task["extra"]["vim_status"] = "BUILD"
+            task["error_msg"] = None
+            task["status"] = "DONE"
+            task["vim_id"] = vim_vm_id
+            instance_element_update = {"status": "BUILD", "vim_vm_id": vim_vm_id, "error_msg": None}
+            return instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("task={} new-VM: {}".format(task_id, e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_vm_id": None, "error_msg": error_text}
+            return instance_element_update
+
+    def del_vm(self, task):
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        vm_vim_id = task["vim_id"]
+        interfaces = task["extra"].get("interfaces", ())
+        try:
+            for iface in interfaces.values():
+                if iface.get("sdn_port_id"):
+                    try:
+                        with self.db_lock:
+                            self.ovim.delete_port(iface["sdn_port_id"], idempotent=True)
+                    except ovimException as e:
+                        self.logger.error("task={} del-VM: ovimException when deleting external_port={}: {} ".format(
+                            task_id, iface["sdn_port_id"], e), exc_info=True)
+                        # TODO Set error_msg at instance_nets
+
+            self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["error_msg"] = None
+            return None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
+            task["status"] = "FAILED"
+            return None
+
+    def _get_net_internal(self, task, filter_param):
+        """
+        Common code for get_net and new_net. It looks for a network on VIM with the filter_params
+        :param task: task for this find or find-or-create action
+        :param filter_param: parameters to send to the vimconnector
+        :return: a dict with the content to update the instance_nets database table. Raises an exception on error, or
+            when network is not found or found more than one
+        """
+        vim_nets = self.vim.get_network_list(filter_param)
+        if not vim_nets:
+            raise VimThreadExceptionNotFound("Network not found with this criteria: '{}'".format(filter_param))
+        elif len(vim_nets) > 1:
+            raise VimThreadException("More than one network found with this criteria: '{}'".format(filter_param))
+        vim_net_id = vim_nets[0]["id"]
+
+        # Discover if this network is managed by a sdn controller
+        sdn_net_id = None
+        result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
+                                  WHERE={'vim_net_id': vim_net_id, 'datacenter_tenant_id': self.datacenter_tenant_id},
+                                  ORDER="instance_scenario_id")
+        if result:
+            sdn_net_id = result[0]['sdn_net_id']
+
+        task["status"] = "DONE"
+        task["extra"]["vim_info"] = {}
+        task["extra"]["created"] = False
+        task["extra"]["vim_status"] = "BUILD"
+        task["extra"]["sdn_net_id"] = sdn_net_id
+        task["error_msg"] = None
+        task["vim_id"] = vim_net_id
+        instance_element_update = {"vim_net_id": vim_net_id, "created": False, "status": "BUILD",
+                                   "error_msg": None, "sdn_net_id": sdn_net_id}
+        return instance_element_update
+
+    def get_net(self, task):
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        try:
+            params = task["params"]
+            filter_param = params[0]
+            instance_element_update = self._get_net_internal(task, filter_param)
+            return instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("task={} get-net: {}".format(task_id, e))
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            instance_element_update = {"vim_net_id": None, "status": "VIM_ERROR",
+                                       "error_msg": task["error_msg"]}
+            return instance_element_update
+
+    def new_net(self, task):
+        vim_net_id = None
+        sdn_net_id = None
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        action_text = ""
+        try:
+            # FIND
+            if task["extra"].get("find"):
+                action_text = "finding"
+                filter_param = task["extra"]["find"][0]
+                try:
+                    instance_element_update = self._get_net_internal(task, filter_param)
+                    return instance_element_update
+                except VimThreadExceptionNotFound:
+                    pass
+            # CREATE
+            params = task["params"]
+            action_text = "creating VIM"
+            vim_net_id, created_items = self.vim.new_network(*params[0:3])
+
+            net_name = params[0]
+            net_type = params[1]
+            wim_account_name = None
+            if len(params) >= 4:
+                wim_account_name = params[3]
+
+            sdn_controller = self.vim.config.get('sdn-controller')
+            if sdn_controller and (net_type == "data" or net_type == "ptp"):
+                network = {"name": net_name, "type": net_type, "region": self.vim["config"]["datacenter_id"]}
+
+                vim_net = self.vim.get_network(vim_net_id)
+                if vim_net.get('encapsulation') != 'vlan':
+                    raise vimconn.vimconnException(
+                        "net '{}' defined as type '{}' has not vlan encapsulation '{}'".format(
+                            net_name, net_type, vim_net['encapsulation']))
+                network["vlan"] = vim_net.get('segmentation_id')
+                action_text = "creating SDN"
+                with self.db_lock:
+                    sdn_net_id = self.ovim.new_network(network)
+
+                if wim_account_name and self.vim.config["wim_external_ports"]:
+                    # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
+                    action_text = "attaching external port to ovim network"
+                    sdn_port_name = "external_port"
+                    sdn_port_data = {
+                        "compute_node": "__WIM:" + wim_account_name[0:58],
+                        "pci": None,
+                        "vlan": network["vlan"],
+                        "net_id": sdn_net_id,
+                        "region": self.vim["config"]["datacenter_id"],
+                        "name": sdn_port_name,
+                    }
+                    try:
+                        with self.db_lock:
+                            sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+                    except ovimException:
+                        sdn_port_data["compute_node"] = "__WIM"
+                        with self.db_lock:
+                            sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+                    self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
+                                                                                            sdn_net_id))
+            task["status"] = "DONE"
+            task["extra"]["vim_info"] = {}
+            task["extra"]["sdn_net_id"] = sdn_net_id
+            task["extra"]["vim_status"] = "BUILD"
+            task["extra"]["created"] = True
+            task["extra"]["created_items"] = created_items
+            task["error_msg"] = None
+            task["vim_id"] = vim_net_id
+            instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "BUILD",
+                                       "created": True, "error_msg": None}
+            return instance_element_update
+        except (vimconn.vimconnException, ovimException) as e:
+            self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
+            task["status"] = "FAILED"
+            task["vim_id"] = vim_net_id
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            task["extra"]["sdn_net_id"] = sdn_net_id
+            instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "VIM_ERROR",
+                                       "error_msg": task["error_msg"]}
+            return instance_element_update
+
+    def del_net(self, task):
+        net_vim_id = task["vim_id"]
+        sdn_net_id = task["extra"].get("sdn_net_id")
+        try:
+            if net_vim_id:
+                self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
+            if sdn_net_id:
+                # Delete any attached port to this sdn network. There can be ports associated to this network in case
+                # it was manually done using 'openmano vim-net-sdn-attach'
+                with self.db_lock:
+                    port_list = self.ovim.get_ports(columns={'uuid'},
+                                                    filter={'name': 'external_port', 'net_id': sdn_net_id})
+                    for port in port_list:
+                        self.ovim.delete_port(port['uuid'], idempotent=True)
+                    self.ovim.delete_network(sdn_net_id, idempotent=True)
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["error_msg"] = None
+            return None
+        except ovimException as e:
+            task["error_msg"] = self._format_vim_error_msg("ovimException obtaining and deleting external "
+                                                           "ports for net {}: {}".format(sdn_net_id, str(e)))
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
+        task["status"] = "FAILED"
+        return None
+
+    # Service Function Instances
+    def new_sfi(self, task):
+        vim_sfi_id = None
+        try:
+            # Waits for interfaces to be ready (avoids failure)
+            time.sleep(1)
+            dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            error_text = ""
+            interfaces = task["depends"][dep_id]["extra"].get("interfaces")
+
+            ingress_interface_id = task.get("extra").get("params").get("ingress_interface_id")
+            egress_interface_id = task.get("extra").get("params").get("egress_interface_id")
+            ingress_vim_interface_id = None
+            egress_vim_interface_id = None
+            for vim_interface, interface_data in interfaces.items():
+                if interface_data.get("interface_id") == ingress_interface_id:
+                    ingress_vim_interface_id = vim_interface
+                    break
+            if ingress_interface_id != egress_interface_id:
+                for vim_interface, interface_data in interfaces.items():
+                    if interface_data.get("interface_id") == egress_interface_id:
+                        egress_vim_interface_id = vim_interface
+                        break
+            else:
+                egress_vim_interface_id = ingress_vim_interface_id
+            if not ingress_vim_interface_id or not egress_vim_interface_id:
+                error_text = "Error creating Service Function Instance, Ingress: {}, Egress: {}".format(
+                    ingress_vim_interface_id, egress_vim_interface_id)
+                self.logger.error(error_text)
+                task["error_msg"] = error_text
+                task["status"] = "FAILED"
+                task["vim_id"] = None
+                return None
+            # At the moment, every port associated with the VM will be used both as ingress and egress ports.
+            # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack,
+            # only the first ingress and first egress ports will be used to create the SFI (Port Pair).
+            ingress_port_id_list = [ingress_vim_interface_id]
+            egress_port_id_list = [egress_vim_interface_id]
+            name = "sfi-{}".format(task["item_id"][:8])
+            # By default no form of IETF SFC Encapsulation will be used
+            vim_sfi_id = self.vim.new_sfi(name, ingress_port_id_list, egress_port_id_list, sfc_encap=False)
+
+            task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
+            task["error_msg"] = None
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["vim_id"] = vim_sfi_id
+            instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
+            return instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
+            return instance_element_update
+
+    def del_sfi(self, task):
+        sfi_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_sfi(sfi_vim_id)
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["error_msg"] = None
+            return None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
+            task["status"] = "FAILED"
+            return None
+
+    def new_sf(self, task):
+        vim_sf_id = None
+        try:
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            error_text = ""
+            depending_tasks = ["TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
+            # sfis = next(iter(task.get("depends").values())).get("extra").get("params")[5]
+            sfis = [task.get("depends").get(dep_task) for dep_task in depending_tasks]
+            sfi_id_list = []
+            for sfi in sfis:
+                sfi_id_list.append(sfi.get("vim_id"))
+            name = "sf-{}".format(task["item_id"][:8])
+            # By default no form of IETF SFC Encapsulation will be used
+            vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
+
+            task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
+            task["error_msg"] = None
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["vim_id"] = vim_sf_id
+            instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
+            return instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
+            return instance_element_update
+
+    def del_sf(self, task):
+        sf_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_sf(sf_vim_id)
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["error_msg"] = None
+            return None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
+            task["status"] = "FAILED"
+            return None
+
+    def new_classification(self, task):
+        vim_classification_id = None
+        try:
+            params = task["params"]
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
+            error_text = ""
+            interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces").keys()
+            # Bear in mind that different VIM connectors might support Classifications differently.
+            # In the case of OpenStack, only the first VNF attached to the classifier will be used
+            # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
+            # Since the VNFFG classifier match lacks the ethertype, classification defaults to
+            # using the IPv4 flow classifier.
+            name = "c-{}".format(task["item_id"][:8])
+            # if not CIDR is given for the IP addresses, add /32:
+            ip_proto = int(params.get("ip_proto"))
+            source_ip = params.get("source_ip")
+            destination_ip = params.get("destination_ip")
+            source_port = params.get("source_port")
+            destination_port = params.get("destination_port")
+            definition = {"logical_source_port": interfaces[0]}
+            if ip_proto:
+                if ip_proto == 1:
+                    ip_proto = 'icmp'
+                elif ip_proto == 6:
+                    ip_proto = 'tcp'
+                elif ip_proto == 17:
+                    ip_proto = 'udp'
+                definition["protocol"] = ip_proto
+            if source_ip:
+                if '/' not in source_ip:
+                    source_ip += '/32'
+                definition["source_ip_prefix"] = source_ip
+            if source_port:
+                definition["source_port_range_min"] = source_port
+                definition["source_port_range_max"] = source_port
+            if destination_port:
+                definition["destination_port_range_min"] = destination_port
+                definition["destination_port_range_max"] = destination_port
+            if destination_ip:
+                if '/' not in destination_ip:
+                    destination_ip += '/32'
+                definition["destination_ip_prefix"] = destination_ip
+
+            vim_classification_id = self.vim.new_classification(
+                name, 'legacy_flow_classifier', definition)
+
+            task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
+            task["error_msg"] = None
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["vim_id"] = vim_classification_id
+            instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id,
+                                       "error_msg": None}
+            return instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
+            return instance_element_update
+
+    def del_classification(self, task):
+        classification_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_classification(classification_vim_id)
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["error_msg"] = None
+            return None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
+            task["status"] = "FAILED"
+            return None
+
+    def new_sfp(self, task):
+        vim_sfp_id = None
+        try:
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in
+                               task.get("extra").get("depends_on")]
+            error_text = ""
+            sf_id_list = []
+            classification_id_list = []
+            for dep in depending_tasks:
+                vim_id = dep.get("vim_id")
+                resource = dep.get("item")
+                if resource == "instance_sfs":
+                    sf_id_list.append(vim_id)
+                elif resource == "instance_classifications":
+                    classification_id_list.append(vim_id)
+
+            name = "sfp-{}".format(task["item_id"][:8])
+            # By default no form of IETF SFC Encapsulation will be used
+            vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
+
+            task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
+            task["error_msg"] = None
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["vim_id"] = vim_sfp_id
+            instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
+            return instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
+            return instance_element_update
+
+    def del_sfp(self, task):
+        sfp_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_sfp(sfp_vim_id)
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+            task["error_msg"] = None
+            return None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
+            task["status"] = "FAILED"
+            return None
diff --git a/RO/osm_ro/vimconn.py b/RO/osm_ro/vimconn.py
new file mode 100644 (file)
index 0000000..6e20654
--- /dev/null
@@ -0,0 +1,942 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+vimconn implement an Abstract class for the vim connector plugins
+ with the definition of the method to be implemented.
+"""
+
+import logging
+import paramiko
+import socket
+from io import StringIO
+import yaml
+import sys
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from osm_ro.utils import deprecated
+
+__author__ = "Alfonso Tierno, Igor D.C."
+__date__  = "$14-aug-2017 23:59:59$"
+
+#Error variables 
+HTTP_Bad_Request = 400
+HTTP_Unauthorized = 401 
+HTTP_Not_Found = 404 
+HTTP_Method_Not_Allowed = 405 
+HTTP_Request_Timeout = 408
+HTTP_Conflict = 409
+HTTP_Not_Implemented = 501
+HTTP_Service_Unavailable = 503 
+HTTP_Internal_Server_Error = 500 
+
+
+class vimconnException(Exception):
+    """Common and base class Exception for all vimconnector exceptions"""
+    def __init__(self, message, http_code=HTTP_Bad_Request):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+
+class vimconnConnectionException(vimconnException):
+    """Connectivity error with the VIM"""
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnUnexpectedResponse(vimconnException):
+    """Get an wrong response from VIM"""
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnAuthException(vimconnException):
+    """Invalid credentials or authorization to perform this action over the VIM"""
+    def __init__(self, message, http_code=HTTP_Unauthorized):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnNotFoundException(vimconnException):
+    """The item is not found at VIM"""
+    def __init__(self, message, http_code=HTTP_Not_Found):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnConflictException(vimconnException):
+    """There is a conflict, e.g. more item found than one"""
+    def __init__(self, message, http_code=HTTP_Conflict):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnNotSupportedException(vimconnException):
+    """The request is not supported by connector"""
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnNotImplemented(vimconnException):
+    """The method is not implemented by the connected"""
+    def __init__(self, message, http_code=HTTP_Not_Implemented):
+        vimconnException.__init__(self, message, http_code)
+
+
+class vimconnector():
+    """Abstract base class for all the VIM connector plugins
+    These plugins must implement a vimconnector class derived from this 
+    and all these privated methods
+    """ 
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                 config={}, persitent_info={}):
+        """
+        Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity
+            checking against the VIM
+        :param uuid: internal id of this VIM
+        :param name: name assigned to this VIM, can be used for logging
+        :param tenant_id: 'tenant_id': (only one of them is mandatory) VIM tenant to be used
+        :param tenant_name: 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+        :param url: url used for normal operations
+        :param url_admin: (optional), url used for administrative tasks
+        :param user: user to access
+        :param passwd: password
+        :param log_level: provided if it should use a different log_level than the general one
+        :param config: dictionary with extra VIM information. This contains a consolidate version of VIM config
+                    at VIM_ACCOUNT (attach)
+        :param persitent_info: dict where the class can store information that will be available among class
+                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                    empty dict. Useful to store login/tokens information for speed up communication
+
+        """
+        self.id = uuid
+        self.name = name
+        self.url = url
+        self.url_admin = url_admin
+        self.tenant_id = tenant_id
+        self.tenant_name = tenant_name
+        self.user = user
+        self.passwd = passwd
+        self.config = config or {}
+        self.availability_zone = None
+        self.logger = logging.getLogger('openmano.vim')
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+        if not self.url_admin:   # try to use normal url
+            self.url_admin = self.url
+    
+    def __getitem__(self, index):
+        if index == 'tenant_id':
+            return self.tenant_id
+        if index == 'tenant_name':
+            return self.tenant_name
+        elif index == 'id':
+            return self.id
+        elif index == 'name':
+            return self.name
+        elif index == 'user':
+            return self.user
+        elif index == 'passwd':
+            return self.passwd
+        elif index == 'url':
+            return self.url
+        elif index == 'url_admin':
+            return self.url_admin
+        elif index == "config":
+            return self.config
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+        
+    def __setitem__(self, index, value):
+        if index == 'tenant_id':
+            self.tenant_id = value
+        if index == 'tenant_name':
+            self.tenant_name = value
+        elif index == 'id':
+            self.id = value
+        elif index == 'name':
+            self.name = value
+        elif index == 'user':
+            self.user = value
+        elif index == 'passwd':
+            self.passwd = value
+        elif index == 'url':
+            self.url = value
+        elif index == 'url_admin':
+            self.url_admin = value
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+
+    @staticmethod
+    def _create_mimemultipart(content_list):
+        """Creates a MIMEmultipart text combining the content_list
+        :param content_list: list of text scripts to be combined
+        :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one
+        element MIMEmultipart is not created and this content is returned
+        """
+        if not content_list:
+            return None
+        elif len(content_list) == 1:
+            return content_list[0]
+        combined_message = MIMEMultipart()
+        for content in content_list:
+            if content.startswith('#include'):
+                mime_format = 'text/x-include-url'
+            elif content.startswith('#include-once'):
+                mime_format = 'text/x-include-once-url'
+            elif content.startswith('#!'):
+                mime_format = 'text/x-shellscript'
+            elif content.startswith('#cloud-config'):
+                mime_format = 'text/cloud-config'
+            elif content.startswith('#cloud-config-archive'):
+                mime_format = 'text/cloud-config-archive'
+            elif content.startswith('#upstart-job'):
+                mime_format = 'text/upstart-job'
+            elif content.startswith('#part-handler'):
+                mime_format = 'text/part-handler'
+            elif content.startswith('#cloud-boothook'):
+                mime_format = 'text/cloud-boothook'
+            else:  # by default
+                mime_format = 'text/x-shellscript'
+            sub_message = MIMEText(content, mime_format, sys.getdefaultencoding())
+            combined_message.attach(sub_message)
+        return combined_message.as_string()
+
+    def _create_user_data(self, cloud_config):
+        """
+        Creates a script user database on cloud_config info
+        :param cloud_config: dictionary with
+            'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+            'users': (optional) list of users to be inserted, each item is a dict with:
+                'name': (mandatory) user name,
+                'key-pairs': (optional) list of strings with the public key to be inserted to the user
+            'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+            'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                'dest': (mandatory) string with the destination absolute path
+                'encoding': (optional, by default text). Can be one of:
+                    'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                'content' (mandatory): string with the content of the file
+                'permissions': (optional) string with file permissions, typically octal notation '0644'
+                'owner': (optional) file owner, string with the format 'owner:group'
+            'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+        :return: config_drive, userdata. The first is a boolean or None, the second a string or None
+        """
+        config_drive = None
+        userdata = None
+        userdata_list = []
+        if isinstance(cloud_config, dict):
+            if cloud_config.get("user-data"):
+                if isinstance(cloud_config["user-data"], str):
+                    userdata_list.append(cloud_config["user-data"])
+                else:
+                    for u in cloud_config["user-data"]:
+                        userdata_list.append(u)
+            if cloud_config.get("boot-data-drive") is not None:
+                config_drive = cloud_config["boot-data-drive"]
+            if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
+                userdata_dict = {}
+                # default user
+                if cloud_config.get("key-pairs"):
+                    userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
+                    userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"]}]
+                if cloud_config.get("users"):
+                    if "users" not in userdata_dict:
+                        userdata_dict["users"] = ["default"]
+                    for user in cloud_config["users"]:
+                        user_info = {
+                            "name": user["name"],
+                            "sudo": "ALL = (ALL)NOPASSWD:ALL"
+                        }
+                        if "user-info" in user:
+                            user_info["gecos"] = user["user-info"]
+                        if user.get("key-pairs"):
+                            user_info["ssh-authorized-keys"] = user["key-pairs"]
+                        userdata_dict["users"].append(user_info)
+
+                if cloud_config.get("config-files"):
+                    userdata_dict["write_files"] = []
+                    for file in cloud_config["config-files"]:
+                        file_info = {
+                            "path": file["dest"],
+                            "content": file["content"]
+                        }
+                        if file.get("encoding"):
+                            file_info["encoding"] = file["encoding"]
+                        if file.get("permissions"):
+                            file_info["permissions"] = file["permissions"]
+                        if file.get("owner"):
+                            file_info["owner"] = file["owner"]
+                        userdata_dict["write_files"].append(file_info)
+                userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
+                                                                        default_flow_style=False))
+            userdata = self._create_mimemultipart(userdata_list)
+            self.logger.debug("userdata: %s", userdata)
+        elif isinstance(cloud_config, str):
+            userdata = cloud_config
+        return config_drive, userdata
+
+    def check_vim_connectivity(self):
+        """Checks VIM can be reached and user credentials are ok.
+        Returns None if success or raises vimconnConnectionException, vimconnAuthException, ...
+        """
+        # by default no checking until each connector implements it
+        return None
+
+    def new_tenant(self, tenant_name, tenant_description):
+        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+        "tenant_name": string max lenght 64
+        "tenant_description": string max length 256
+        returns the tenant identifier or raise exception
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def delete_tenant(self, tenant_id):
+        """Delete a tenant from VIM
+        tenant_id: returned VIM tenant_id on "new_tenant"
+        Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def get_tenant_list(self, filter_dict={}):
+        """Obtain tenants of VIM
+        filter_dict dictionary that can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Params:
+            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                name: string  => returns only networks with this name
+                id:   string  => returns networks with this VIM id, this imply returns one network at most
+                shared: boolean >= returns only networks that are (or are not) shared
+                tenant_id: sting => returns only networks that belong to this tenant/project
+                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+        Returns the network list of dictionaries. each dictionary contains:
+            'id': (mandatory) VIM network id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def get_network(self, net_id):
+        """Obtain network details from the 'net_id' VIM network
+        Return a dict that contains:
+            'id': (mandatory) VIM network id, that is, net_id
+            'name': (mandatory) VIM network name
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        Raises an exception upon error or when network is not found
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def refresh_nets_status(self, net_list):
+        """Get the status of the networks
+        Params:
+            'net_list': a list with the VIM network id to be get the status
+        Returns a dictionary with:
+            'net_id':         #VIM id of this network
+                status:     #Mandatory. Text with one of:
+                    #  DELETED (not found at vim)
+                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                    #  OTHER (Vim reported other status not understood)
+                    #  ERROR (VIM indicates an ERROR status)
+                    #  ACTIVE, INACTIVE, DOWN (admin down),
+                    #  BUILD (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+            'net_id2': ...
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def get_flavor(self, flavor_id):
+        """Obtain flavor details from the VIM
+        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+        Raises an exception upon error or if not found
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def get_flavor_id_from_data(self, flavor_dict):
+        """Obtain flavor id that match the flavor description
+        Params:
+            'flavor_dict': dictionary that contains:
+                'disk': main hard disk in GB
+                'ram': meomry in MB
+                'vcpus': number of virtual cpus
+                #TODO: complete parameters for EPA
+        Returns the flavor_id or raises a vimconnNotFoundException
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def new_flavor(self, flavor_data):
+        """Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address   
+                disk: disk size
+                is_public:
+                 #TODO to concrete
+        Returns the flavor identifier"""
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def delete_flavor(self, flavor_id):
+        """Deletes a tenant flavor from VIM identify by its id
+        Returns the used id or raise an exception"""
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def new_image(self, image_dict):
+        """ Adds a tenant image to VIM
+        Returns the image id or raises an exception if failed
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def delete_image(self, image_id):
+        """Deletes a tenant image from VIM
+        Returns the image_id if image is deleted or raises an exception on error"""
+        raise vimconnNotImplemented("Should have implemented this")
+
+    def get_image_id_from_path(self, path):
+        """Get the image id from image path in the VIM database.
+           Returns the image_id or raises a vimconnNotFoundException
+        """
+        raise vimconnNotImplemented("Should have implemented this")
+        
+    def get_image_list(self, filter_dict={}):
+        """Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+        availability_zone_index=None, availability_zone_list=None):
+        """Adds a VM instance to VIM
+        Params:
+            'start': (boolean) indicates if VM must start or created in pause mode.
+            'image_id','flavor_id': image and flavor VIM id to use for the VM
+            'net_list': list of interfaces, each one is a dictionary with:
+                'name': (optional) name for the interface.
+                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                'mac_address': (optional) mac address to assign to this interface
+                'ip_address': (optional) IP address to assign to this interface
+                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                'type': (mandatory) can be one of:
+                    'virtual', in this case always connected to a network of type 'net_type=bridge'
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                            are allocated on the same physical NIC
+                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                or True, it must apply the default VIM behaviour
+                After execution the method will add the key:
+                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                        interface. 'net_list' is modified
+            'cloud_config': (optional) dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def get_vminstance(self,vm_id):
+        """Returns the VM instance information from VIM"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def delete_vminstance(self, vm_id, created_items=None):
+        """
+        Removes a VM instance from VIM and its associated elements
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
+            action_vminstance
+        :return: None or the same vm_id. Raises an exception on fail
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
+                                #  BUILD (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces: list with interface info. Each item a dictionary with:
+                        vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected, if provided at creation
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+                        compute_node:     #identification of compute node where PF,VF interface is allocated
+                        pci:              #PCI address of the NIC that hosts the PF,VF
+                        vlan:             #physical VLAN used for VF
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        """
+        Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.
+        created_items is a dictionary with items that
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+        :param action_dict: dictionary with the action to perform
+        :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to
+            the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector
+            dependent, but do not use nested dictionaries and a value of None should be the same as not present. This
+            method can modify this value
+        :return: None, or a console dict
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def get_vminstance_console(self, vm_id, console_type="vnc"):
+        """
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types, 
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address 
+                port:     the http, ssh, ... port 
+                suffix:   extra text, e.g. the http path and query string   
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def inject_user_key(self, ip_addr=None, user=None, key=None, ro_key=None, password=None):
+        """
+        Inject a ssh public key in a VM
+        Params:
+            ip_addr: ip address of the VM
+            user: username (default-user) to enter in the VM
+            key: public key to be injected in the VM
+            ro_key: private key of the RO, used to enter in the VM if the password is not provided
+            password: password of the user to enter in the VM
+        The function doesn't return a value:
+        """
+        if not ip_addr or not user:
+            raise vimconnNotSupportedException("All parameters should be different from 'None'")
+        elif not ro_key and not password:
+            raise vimconnNotSupportedException("All parameters should be different from 'None'")
+        else:
+            commands = {'mkdir -p ~/.ssh/', 'echo "{}" >> ~/.ssh/authorized_keys'.format(key),
+                        'chmod 644 ~/.ssh/authorized_keys', 'chmod 700 ~/.ssh/'}
+            client = paramiko.SSHClient()
+            try:
+                if ro_key:
+                    pkey = paramiko.RSAKey.from_private_key(StringIO(ro_key))
+                else:
+                    pkey = None
+                client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+                client.connect(ip_addr, username=user, password=password, pkey=pkey, timeout=10)
+                for command in commands:
+                    (i, o, e) = client.exec_command(command, timeout=10)
+                    returncode = o.channel.recv_exit_status()
+                    output = o.read()
+                    outerror = e.read()
+                    if returncode != 0:
+                        text = "run_command='{}' Error='{}'".format(command, outerror)
+                        raise vimconnUnexpectedResponse("Cannot inject ssh key in VM: '{}'".format(text))
+                        return
+            except (socket.error, paramiko.AuthenticationException, paramiko.SSHException) as message:
+                raise vimconnUnexpectedResponse(
+                    "Cannot inject ssh key in VM: '{}' - {}".format(ip_addr, str(message)))
+                return
+
+# Optional methods
+
+    def new_tenant(self,tenant_name,tenant_description):
+        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+        "tenant_name": string max lenght 64
+        "tenant_description": string max length 256
+        returns the tenant identifier or raise exception
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_tenant(self,tenant_id,):
+        """Delete a tenant from VIM
+        tenant_id: returned VIM tenant_id on "new_tenant"
+        Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_tenant_list(self, filter_dict=None):
+        """Obtain tenants of VIM
+        filter_dict dictionary that can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_classification(self, name, ctype, definition):
+        """Creates a traffic classification in the VIM
+        Params:
+            'name': name of this classification
+            'ctype': type of this classification
+            'definition': definition of this classification (type-dependent free-form text)
+        Returns the VIM's classification ID on success or raises an exception on failure
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_classification(self, classification_id):
+        """Obtain classification details of the VIM's classification with ID='classification_id'
+        Return a dict that contains:
+            'id': VIM's classification ID (same as classification_id)
+            'name': VIM's classification name
+            'type': type of this classification
+            'definition': definition of the classification
+            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible
+        Raises an exception upon error or when classification is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_classification_list(self, filter_dict={}):
+        """Obtain classifications from the VIM
+        Params:
+            'filter_dict' (optional): contains the entries to filter the classifications on and only return those that match ALL:
+                id:   string => returns classifications with this VIM's classification ID, which implies a return of one classification at most
+                name: string => returns only classifications with this name
+                type: string => returns classifications of this type
+                definition: string => returns classifications that have this definition
+                tenant_id: string => returns only classifications that belong to this tenant/project
+        Returns a list of classification dictionaries, each dictionary contains:
+            'id': (mandatory) VIM's classification ID
+            'name': (mandatory) VIM's classification name
+            'type': type of this classification
+            'definition': definition of the classification
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no classification matches the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def delete_classification(self, classification_id):
+        """Deletes a classification from the VIM
+        Returns the classification ID (classification_id) or raises an exception upon error or when classification is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+        """Creates a service function instance in the VIM
+        Params:
+            'name': name of this service function instance
+            'ingress_ports': set of ingress ports (VIM's port IDs)
+            'egress_ports': set of egress ports (VIM's port IDs)
+            'sfc_encap': boolean stating whether this specific instance supports IETF SFC Encapsulation
+        Returns the VIM's service function instance ID on success or raises an exception on failure
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_sfi(self, sfi_id):
+        """Obtain service function instance details of the VIM's service function instance with ID='sfi_id'
+        Return a dict that contains:
+            'id': VIM's sfi ID (same as sfi_id)
+            'name': VIM's sfi name
+            'ingress_ports': set of ingress ports (VIM's port IDs)
+            'egress_ports': set of egress ports (VIM's port IDs)
+            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible
+        Raises an exception upon error or when service function instance is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_sfi_list(self, filter_dict={}):
+        """Obtain service function instances from the VIM
+        Params:
+            'filter_dict' (optional): contains the entries to filter the sfis on and only return those that match ALL:
+                id:   string  => returns sfis with this VIM's sfi ID, which implies a return of one sfi at most
+                name: string  => returns only service function instances with this name
+                tenant_id: string => returns only service function instances that belong to this tenant/project
+        Returns a list of service function instance dictionaries, each dictionary contains:
+            'id': (mandatory) VIM's sfi ID
+            'name': (mandatory) VIM's sfi name
+            'ingress_ports': set of ingress ports (VIM's port IDs)
+            'egress_ports': set of egress ports (VIM's port IDs)
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no sfi matches the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def delete_sfi(self, sfi_id):
+        """Deletes a service function instance from the VIM
+        Returns the service function instance ID (sfi_id) or raises an exception upon error or when sfi is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def new_sf(self, name, sfis, sfc_encap=True):
+        """Creates (an abstract) service function in the VIM
+        Params:
+            'name': name of this service function
+            'sfis': set of service function instances of this (abstract) service function
+            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+        Returns the VIM's service function ID on success or raises an exception on failure
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_sf(self, sf_id):
+        """Obtain service function details of the VIM's service function with ID='sf_id'
+        Return a dict that contains:
+            'id': VIM's sf ID (same as sf_id)
+            'name': VIM's sf name
+            'sfis': VIM's sf's set of VIM's service function instance IDs
+            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible
+        Raises an exception upon error or when sf is not found
+        """
+
+    def get_sf_list(self, filter_dict={}):
+        """Obtain service functions from the VIM
+        Params:
+            'filter_dict' (optional): contains the entries to filter the sfs on and only return those that match ALL:
+                id:   string  => returns sfs with this VIM's sf ID, which implies a return of one sf at most
+                name: string  => returns only service functions with this name
+                tenant_id: string => returns only service functions that belong to this tenant/project
+        Returns a list of service function dictionaries, each dictionary contains:
+            'id': (mandatory) VIM's sf ID
+            'name': (mandatory) VIM's sf name
+            'sfis': VIM's sf's set of VIM's service function instance IDs
+            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no sf matches the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def delete_sf(self, sf_id):
+        """Deletes (an abstract) service function from the VIM
+        Returns the service function ID (sf_id) or raises an exception upon error or when sf is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+        """Creates a service function path
+        Params:
+            'name': name of this service function path
+            'classifications': set of traffic classifications that should be matched on to get into this sfp
+            'sfs': list of every service function that constitutes this path , from first to last
+            'sfc_encap': whether this is an SFC-Encapsulated chain (i.e using NSH), True by default
+            'spi': (optional) the Service Function Path identifier (SPI: Service Path Identifier) for this path
+        Returns the VIM's sfp ID on success or raises an exception on failure
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_sfp(self, sfp_id):
+        """Obtain service function path details of the VIM's sfp with ID='sfp_id'
+        Return a dict that contains:
+            'id': VIM's sfp ID (same as sfp_id)
+            'name': VIM's sfp name
+            'classifications': VIM's sfp's list of VIM's classification IDs
+            'sfs': VIM's sfp's list of VIM's service function IDs
+            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'error_msg': (optional) text that explains the ERROR status
+            other VIM specific fields: (optional) whenever possible
+        Raises an exception upon error or when sfp is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def get_sfp_list(self, filter_dict={}):
+        """Obtain service function paths from VIM
+        Params:
+            'filter_dict' (optional): contains the entries to filter the sfps on, and only return those that match ALL:
+                id:   string  => returns sfps with this VIM's sfp ID , which implies a return of one sfp at most
+                name: string  => returns only sfps with this name
+                tenant_id: string => returns only sfps that belong to this tenant/project
+        Returns a list of service function path dictionaries, each dictionary contains:
+            'id': (mandatory) VIM's sfp ID
+            'name': (mandatory) VIM's sfp name
+            'classifications': VIM's sfp's list of VIM's classification IDs
+            'sfs': VIM's sfp's list of VIM's service function IDs
+            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+        List can be empty if no sfp matches the filter_dict. Raise an exception only upon VIM connectivity,
+            authorization, or some other unspecific error
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+    def delete_sfp(self, sfp_id):
+        """Deletes a service function path from the VIM
+        Returns the sfp ID (sfp_id) or raises an exception upon error or when sf is not found
+        """
+        raise vimconnNotImplemented( "SFC support not implemented" )
+
+# NOT USED METHODS in current version. Deprecated
+
+    @deprecated
+    def host_vim2gui(self, host, server_dict):
+        """Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        """
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    @deprecated
+    def get_hosts_info(self):
+        """Get the information of deployed hosts
+        Returns the hosts content"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    @deprecated
+    def get_hosts(self, vim_tenant):
+        """Get the hosts and deployed instances
+        Returns the hosts content"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    @deprecated
+    def get_processor_rankings(self):
+        """Get the processor rankings in the VIM database"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    @deprecated
+    def new_host(self, host_data):
+        """Adds a new host to VIM"""
+        """Returns status code of the VIM response"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    @deprecated
+    def new_external_port(self, port_data):
+        """Adds a external port to VIM"""
+        """Returns the port identifier"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    @deprecated
+    def new_external_network(self,net_name,net_type):
+        """Adds a external network to VIM (shared)"""
+        """Returns the network identifier"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+    @deprecated
+
+    @deprecated
+    def connect_port_network(self, port_id, network_id, admin=False):
+        """Connects a external port to a network"""
+        """Returns status code of the VIM response"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    @deprecated
+    def new_vminstancefromJSON(self, vm_data):
+        """Adds a VM instance to VIM"""
+        """Returns the instance identifier"""
+        raise vimconnNotImplemented( "Should have implemented this" )
+
diff --git a/RO/osm_ro/vmwarecli.py b/RO/osm_ro/vmwarecli.py
new file mode 100755 (executable)
index 0000000..b7b2742
--- /dev/null
@@ -0,0 +1,816 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##
+# This file is standalone vmware vcloud director util
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: mbayramov@vmware.com
+##
+
+"""
+
+Standalone application that leverage openmano vmware connector work with vCloud director rest api.
+
+ - Provides capability to create and delete VDC for specific organization.
+ - Create, delete and manage network for specific VDC
+ - List deployed VM's , VAPPs, VDSs, Organization
+ - View detail information about VM / Vapp , Organization etc
+ - Operate with images upload / boot / power on etc
+
+ Usage example.
+
+ List organization created in vCloud director
+  vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list org
+
+ List VDC for particular organization
+  vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list vdc
+
+ Upload image
+  python vmwarerecli.py image upload /Users/spyroot/Developer/Openmano/Ro/vnfs/cirros/cirros.ovf
+
+ Boot Image
+    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF image boot cirros cirros
+
+ View vApp
+    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF view vapp 90bd2b4e-f782-46cf-b5e2-c3817dcf6633 -u
+
+ List VMS
+    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vms
+
+ List VDC in OSM format
+  python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vdc -o
+
+Mustaafa Bayramov
+mbayramov@vmware.com
+"""
+import os
+import argparse
+import traceback
+import uuid
+
+from xml.etree import ElementTree as ET
+
+import sys
+from pyvcloud import Http
+
+import logging
+from osm_ro import vimconn
+import time
+import uuid
+import urllib3
+import requests
+
+from osm_ro.vimconn_vmware import vimconnector
+# TODO py3 uncoment  from requests.packages.urllib3.exceptions import InsecureRequestWarning
+from prettytable import PrettyTable
+
+# TODO py3 uncoment requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+__author__ = "Mustafa Bayramov"
+__date__ = "$16-Sep-2016 11:09:29$"
+
+
+# TODO move to main vim
+def delete_network_action(vca=None, network_uuid=None):
+    """
+    Method leverages vCloud director and query network based on network uuid
+
+    Args:
+        vca - is active VCA connection.
+        network_uuid - is a network uuid
+
+        Returns:
+            The return XML respond
+    """
+
+    if vca is None or network_uuid is None:
+        return None
+
+    url_list = [vca.host, '/api/admin/network/', network_uuid]
+    vm_list_rest_call = ''.join(url_list)
+
+    if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+        response = Http.get(url=vm_list_rest_call,
+                            headers=vca.vcloud_session.get_vcloud_headers(),
+                            verify=vca.verify,
+                            logger=vca.logger)
+        if response.status_code == requests.codes.ok:
+            print(response.content)
+            return response.content
+
+    return None
+
+
+def print_vapp(vapp_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        vapp_dict: container vapp object.
+
+        Returns:
+            The return nothing
+    """
+
+    # following key available to print
+    # {'status': 'POWERED_OFF', 'storageProfileName': '*', 'hardwareVersion': '7', 'vmToolsVersion': '0',
+    #  'memoryMB': '384',
+    #  'href': 'https://172.16.254.206/api/vAppTemplate/vm-129e22e8-08dc-4cb6-8358-25f635e65d3b',
+    #  'isBusy': 'false', 'isDeployed': 'false', 'isInMaintenanceMode': 'false', 'isVAppTemplate': 'true',
+    #  'networkName': 'nat', 'isDeleted': 'false', 'catalogName': 'Cirros',
+    #  'containerName': 'Cirros Template', #  'container':
+    #  'https://172.16.254.206/api/vAppTemplate/vappTemplate-b966453d-c361-4505-9e38-ccef45815e5d',
+    #  'name': 'Cirros', 'pvdcHighestSupportedHardwareVersion': '11', 'isPublished': 'false',
+    #  'numberOfCpus': '1', 'vdc': 'https://172.16.254.206/api/vdc/a5056f85-418c-4bfd-8041-adb0f48be9d9',
+    #  'guestOs': 'Other (32-bit)', 'isVdcEnabled': 'true'}
+
+    if vapp_dict is None:
+        return
+
+    vm_table = PrettyTable(['vm   uuid',
+                            'vapp name',
+                            'vapp uuid',
+                            'network name',
+                            'storage name',
+                            'vcpu', 'memory', 'hw ver','deployed','status'])
+    for k in vapp_dict:
+        entry = []
+        entry.append(k)
+        entry.append(vapp_dict[k]['containerName'])
+        # vm-b1f5cd4c-2239-4c89-8fdc-a41ff18e0d61
+        entry.append(vapp_dict[k]['container'].split('/')[-1:][0][5:])
+        entry.append(vapp_dict[k]['networkName'])
+        entry.append(vapp_dict[k]['storageProfileName'])
+        entry.append(vapp_dict[k]['numberOfCpus'])
+        entry.append(vapp_dict[k]['memoryMB'])
+        entry.append(vapp_dict[k]['pvdcHighestSupportedHardwareVersion'])
+        entry.append(vapp_dict[k]['isDeployed'])
+        entry.append(vapp_dict[k]['status'])
+
+        vm_table.add_row(entry)
+
+    print(vm_table)
+
+
+def print_org(org_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        org_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+
+    if org_dict is None:
+        return
+
+    org_table = PrettyTable(['org uuid', 'name'])
+    for k in org_dict:
+        entry = [k, org_dict[k]]
+        org_table.add_row(entry)
+
+    print(org_table)
+
+
+def print_vm_list(vm_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        vm_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+    if vm_dict is None:
+        return
+
+    vm_table = PrettyTable(
+        ['vm uuid', 'vm name', 'vapp uuid', 'vdc uuid', 'network name', 'is deployed', 'vcpu', 'memory', 'status'])
+
+    try:
+        for k in vm_dict:
+            entry = []
+            entry.append(k)
+            entry.append(vm_dict[k]['name'])
+            entry.append(vm_dict[k]['container'].split('/')[-1:][0][5:])
+            entry.append(vm_dict[k]['vdc'].split('/')[-1:][0])
+            entry.append(vm_dict[k]['networkName'])
+            entry.append(vm_dict[k]['isDeployed'])
+            entry.append(vm_dict[k]['numberOfCpus'])
+            entry.append(vm_dict[k]['memoryMB'])
+            entry.append(vm_dict[k]['status'])
+            vm_table.add_row(entry)
+        print(vm_table)
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        pass
+
+
+def print_vdc_list(org_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        org_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+    if org_dict is None:
+        return
+    try:
+        vdcs_dict = {}
+        if 'vdcs' in org_dict:
+            vdcs_dict = org_dict['vdcs']
+        vdc_table = PrettyTable(['vdc uuid', 'vdc name'])
+        for k in vdcs_dict:
+            entry = [k, vdcs_dict[k]]
+            vdc_table.add_row(entry)
+
+        print(vdc_table)
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def print_network_list(org_dict=None):
+    """ Method print network list.
+
+    Args:
+        org_dict:   dictionary of organization that contain key networks with a list of all
+                    network for for specific VDC
+
+        Returns:
+            The return nothing
+    """
+    if org_dict is None:
+        return
+    try:
+        network_dict = {}
+        if 'networks' in org_dict:
+            network_dict = org_dict['networks']
+        network_table = PrettyTable(['network uuid', 'network name'])
+        for k in network_dict:
+            entry = [k, network_dict[k]]
+            network_table.add_row(entry)
+
+        print(network_table)
+
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def print_org_details(org_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        org_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+    if org_dict is None:
+        return
+    try:
+        catalogs_dict = {}
+
+        print_vdc_list(org_dict=org_dict)
+        print_network_list(org_dict=org_dict)
+
+        if 'catalogs' in org_dict:
+            catalogs_dict = org_dict['catalogs']
+
+        catalog_table = PrettyTable(['catalog uuid', 'catalog name'])
+        for k in catalogs_dict:
+            entry = [k, catalogs_dict[k]]
+            catalog_table.add_row(entry)
+
+        print(catalog_table)
+
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def delete_actions(vim=None, action=None, namespace=None):
+    if action == 'network' or namespace.action == 'network':
+        logger.debug("Requesting delete for network {}".format(namespace.network_name))
+        network_uuid = namespace.network_name
+        # if request name based we need find UUID
+        # TODO optimize it or move to external function
+        if not namespace.uuid:
+            org_dict = vim.get_org_list()
+            for org in org_dict:
+                org_net = vim.get_org(org)['networks']
+                for network in org_net:
+                    if org_net[network] == namespace.network_name:
+                        network_uuid = network
+
+        vim.delete_network_action(network_uuid=network_uuid)
+
+
+def list_actions(vim=None, action=None, namespace=None):
+    """ Method provide list object from VDC action
+
+       Args:
+           vim - is vcloud director vim connector.
+           action - is action for list ( vdc / org etc)
+           namespace -  must contain VDC / Org information.
+
+           Returns:
+               The return nothing
+       """
+
+    org_id = None
+    myorgs = vim.get_org_list()
+    for org in myorgs:
+        if myorgs[org] == namespace.vcdorg:
+            org_id = org
+        break
+    else:
+        print(" Invalid organization.")
+        return
+
+    if action == 'vms' or namespace.action == 'vms':
+        vm_dict = vim.get_vm_list(vdc_name=namespace.vcdvdc)
+        print_vm_list(vm_dict=vm_dict)
+    elif action == 'vapps' or namespace.action == 'vapps':
+        vapp_dict = vim.get_vapp_list(vdc_name=namespace.vcdvdc)
+        print_vapp(vapp_dict=vapp_dict)
+    elif action == 'networks' or namespace.action == 'networks':
+        if namespace.osm:
+            osm_print(vim.get_network_list(filter_dict={}))
+        else:
+            print_network_list(vim.get_org(org_uuid=org_id))
+    elif action == 'vdc' or namespace.action == 'vdc':
+        if namespace.osm:
+            osm_print(vim.get_tenant_list(filter_dict=None))
+        else:
+            print_vdc_list(vim.get_org(org_uuid=org_id))
+    elif action == 'org' or namespace.action == 'org':
+        print_org(org_dict=vim.get_org_list())
+    else:
+        return None
+
+
+def print_network_details(network_dict=None):
+    try:
+        network_table = PrettyTable(network_dict.keys())
+        entry = list(network_dict.values())
+        network_table.add_row(entry[0])
+        print(network_table)
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def osm_print(generic_dict=None):
+
+    try:
+        for element in generic_dict:
+            table = PrettyTable(element.keys())
+            entry = list(element.values())
+            table.add_row(entry[0])
+        print(table)
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def view_actions(vim=None, action=None, namespace=None):
+    org_id = None
+    orgs = vim.get_org_list()
+    for org in orgs:
+        if orgs[org] == namespace.vcdorg:
+            org_id = org
+        break
+    else:
+        print(" Invalid organization.")
+        return
+
+    myorg = vim.get_org(org_uuid=org_id)
+
+    # view org
+    if action == 'org' or namespace.action == 'org':
+        org_id = None
+        orgs = vim.get_org_list()
+        if namespace.uuid:
+            if namespace.org_name in orgs:
+                org_id = namespace.org_name
+        else:
+            # we need find UUID based on name provided
+            for org in orgs:
+                if orgs[org] == namespace.org_name:
+                    org_id = org
+                    break
+
+        logger.debug("Requesting view for orgs {}".format(org_id))
+        print_org_details(vim.get_org(org_uuid=org_id))
+
+    # view vapp action
+    if action == 'vapp' or namespace.action == 'vapp':
+        if namespace.vapp_name is not None and namespace.uuid:
+            logger.debug("Requesting vapp {} for vdc {}".format(namespace.vapp_name, namespace.vcdvdc))
+            vapp_dict = {}
+            vapp_uuid = namespace.vapp_name
+            # if request based on just name we need get UUID
+            if not namespace.uuid:
+                vapp_uuid = vim.get_vappid(vdc=namespace.vcdvdc, vapp_name=namespace.vapp_name)
+                if vapp_uuid is None:
+                    print("Can't find vapp by given name {}".format(namespace.vapp_name))
+                    return
+
+            print(" namespace {}".format(namespace))
+            if vapp_dict is not None and namespace.osm:
+                vm_info_dict = vim.get_vminstance(vim_vm_uuid=vapp_uuid)
+                print(vm_info_dict)
+            if vapp_dict is not None and namespace.osm != True:
+                vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vapp_uuid, isuuid=True)
+                print_vapp(vapp_dict=vapp_dict)
+
+    # view network
+    if action == 'network' or namespace.action == 'network':
+        logger.debug("Requesting view for network {}".format(namespace.network_name))
+        network_uuid = namespace.network_name
+        # if request name based we need find UUID
+        # TODO optimize it or move to external function
+        if not namespace.uuid:
+            if 'networks' not in myorg:
+                print("Network {} is undefined in vcloud director for org {} vdc {}".format(namespace.network_name,
+                                                                                            vim.name,
+                                                                                            vim.tenant_name))
+                return
+
+            my_org_net = myorg['networks']
+            for network in my_org_net:
+                if my_org_net[network] == namespace.network_name:
+                    network_uuid = network
+                    break
+
+        print(print_network_details(network_dict=vim.get_vcd_network(network_uuid=network_uuid)))
+
+
+def create_actions(vim=None, action=None, namespace=None):
+    """Method gets provider vdc view from vcloud director
+
+        Args:
+            vim - is Cloud director vim connector
+            action - action for create ( network / vdc etc)
+
+        Returns:
+            The return xml content of respond or None
+    """
+    if action == 'network' or namespace.action == 'network':
+        logger.debug("Creating a network in vcloud director".format(namespace.network_name))
+        network_uuid = vim.create_network(namespace.network_name)
+        if network_uuid is not None:
+            print("Crated new network {} and uuid: {}".format(namespace.network_name, network_uuid))
+        else:
+            print("Failed create a new network {}".format(namespace.network_name))
+    elif action == 'vdc' or namespace.action == 'vdc':
+        logger.debug("Creating a new vdc in vcloud director.".format(namespace.vdc_name))
+        vdc_uuid = vim.create_vdc(namespace.vdc_name)
+        if vdc_uuid is not None:
+            print("Crated new vdc {} and uuid: {}".format(namespace.vdc_name, vdc_uuid))
+        else:
+            print("Failed create a new vdc {}".format(namespace.vdc_name))
+    else:
+        return None
+
+
+def validate_uuid4(uuid_string):
+    """Function validate that string contain valid uuid4
+
+        Args:
+            uuid_string - valid UUID string
+
+        Returns:
+            The return true if string contain valid UUID format
+    """
+    try:
+        val = uuid.UUID(uuid_string, version=4)
+    except ValueError:
+        return False
+    return True
+
+
+def upload_image(vim=None, image_file=None):
+    """Function upload image to vcloud director
+
+        Args:
+            image_file - valid UUID string
+
+        Returns:
+            The return true if image uploaded correctly
+    """
+    try:
+        catalog_uuid = vim.get_image_id_from_path(path=image_file, progress=True)
+        if catalog_uuid is not None and validate_uuid4(catalog_uuid):
+            print("Image uploaded and uuid {}".format(catalog_uuid))
+            return True
+    except vimconn.vimconnException as upload_exception:
+        print("Failed uploaded {} image".format(image_file))
+        print("Error Reason: {}".format(upload_exception.message))
+    return False
+
+
+def boot_image(vim=None, image_name=None, vm_name=None):
+    """ Function boot image that resided in vcloud director.
+        The image name can be UUID of name.
+
+        Args:
+            vim - vim connector
+            image_name - image identified by UUID or text string.
+            vm_name - vmname
+
+
+         Returns:
+             The return true if image uploaded correctly
+     """
+
+    vim_catalog = None
+    try:
+        catalogs = vim.vca.get_catalogs()
+        if not validate_uuid4(image_name):
+            vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+            if vim_catalog is None:
+                return None
+        else:
+            vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+            if vim_catalog is None:
+                return None
+
+        print(" Booting {} image id {} ".format(vm_name, vim_catalog))
+        vm_uuid, _ = vim.new_vminstance(name=vm_name, image_id=vim_catalog)
+        if vm_uuid is not None and validate_uuid4(vm_uuid):
+            print("Image booted and vm uuid {}".format(vm_uuid))
+            vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vm_uuid, isuuid=True)
+            if vapp_dict is not None:
+                print_vapp(vapp_dict=vapp_dict)
+        return True
+    except vimconn.vimconnNotFoundException as notFound:
+        print("Failed boot {} image".format(image_name))
+        print(notFound.message)
+    except vimconn.vimconnException as vimconError:
+        print("Failed boot {} image".format(image_name))
+        print(vimconError.message)
+    except:
+        print("Failed boot {} image".format(image_name))
+
+
+        return False
+
+
+def image_action(vim=None, action=None, namespace=None):
+    """ Function present set of action to manipulate with image.
+          - upload image
+          - boot image.
+          - delete image ( not yet done )
+
+        Args:
+             vim - vcloud director connector
+             action - string (upload/boot etc)
+             namespace - contain other attributes image name etc
+
+         Returns:
+             The return nothing
+     """
+
+    if action == 'upload' or namespace.action == 'upload':
+        upload_image(vim=vim, image_file=namespace.image)
+    elif action == 'boot' or namespace.action == 'boot':
+        boot_image(vim=vim, image_name=namespace.image, vm_name=namespace.vmname)
+    else:
+        return None
+
+
+def vmwarecli(command=None, action=None, namespace=None):
+    logger.debug("Namespace {}".format(namespace))
+    urllib3.disable_warnings()
+
+    vcduser = None
+    vcdpasword = None
+    vcdhost = None
+    vcdorg = None
+
+    if namespace.vcdvdc is None:
+        while True:
+            vcduser = input("Enter vcd username: ")
+            if vcduser is not None and len(vcduser) > 0:
+                break
+    else:
+        vcduser = namespace.vcduser
+
+    if namespace.vcdpassword is None:
+        while True:
+            vcdpasword = input("Please enter vcd password: ")
+            if vcdpasword is not None and len(vcdpasword) > 0:
+                break
+    else:
+        vcdpasword = namespace.vcdpassword
+
+    if namespace.vcdhost is None:
+        while True:
+            vcdhost = input("Please enter vcd host name or ip: ")
+            if vcdhost is not None and len(vcdhost) > 0:
+                break
+    else:
+        vcdhost = namespace.vcdhost
+
+    if namespace.vcdorg is None:
+        while True:
+            vcdorg = input("Please enter vcd organization name: ")
+            if vcdorg is not None and len(vcdorg) > 0:
+                break
+    else:
+        vcdorg = namespace.vcdorg
+
+    try:
+        vim = vimconnector(uuid=None,
+                           name=vcdorg,
+                           tenant_id=None,
+                           tenant_name=namespace.vcdvdc,
+                           url=vcdhost,
+                           url_admin=vcdhost,
+                           user=vcduser,
+                           passwd=vcdpasword,
+                           log_level="DEBUG",
+                           config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword})
+        vim.vca = vim.connect()
+
+    except vimconn.vimconnConnectionException:
+        print("Failed connect to vcloud director. Please check credential and hostname.")
+        return
+
+    # list
+    if command == 'list' or namespace.command == 'list':
+        logger.debug("Client requested list action")
+        # route request to list actions
+        list_actions(vim=vim, action=action, namespace=namespace)
+
+    # view action
+    if command == 'view' or namespace.command == 'view':
+        logger.debug("Client requested view action")
+        view_actions(vim=vim, action=action, namespace=namespace)
+
+    # delete action
+    if command == 'delete' or namespace.command == 'delete':
+        logger.debug("Client requested delete action")
+        delete_actions(vim=vim, action=action, namespace=namespace)
+
+    # create action
+    if command == 'create' or namespace.command == 'create':
+        logger.debug("Client requested create action")
+        create_actions(vim=vim, action=action, namespace=namespace)
+
+    # image action
+    if command == 'image' or namespace.command == 'image':
+        logger.debug("Client requested create action")
+        image_action(vim=vim, action=action, namespace=namespace)
+
+
+if __name__ == '__main__':
+    defaults = {'vcdvdc': 'default',
+                'vcduser': 'admin',
+                'vcdpassword': 'admin',
+                'vcdhost': 'https://localhost',
+                'vcdorg': 'default',
+                'debug': 'INFO'}
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-u', '--vcduser', help='vcloud director username', type=str)
+    parser.add_argument('-p', '--vcdpassword', help='vcloud director password', type=str)
+    parser.add_argument('-U', '--vcdamdin', help='vcloud director password', type=str)
+    parser.add_argument('-P', '--vcdadminpassword', help='vcloud director password', type=str)
+    parser.add_argument('-c', '--vcdhost', help='vcloud director host', type=str)
+    parser.add_argument('-o', '--vcdorg', help='vcloud director org', type=str)
+    parser.add_argument('-v', '--vcdvdc', help='vcloud director vdc', type=str)
+    parser.add_argument('-d', '--debug', help='debug level', type=int)
+
+    parser_subparsers = parser.add_subparsers(help='commands', dest='command')
+    sub = parser_subparsers.add_parser('list', help='List objects (VMs, vApps, networks)')
+    sub_subparsers = sub.add_subparsers(dest='action')
+
+    list_vms = sub_subparsers.add_parser('vms', help='list - all vm deployed in vCloud director')
+    list_vapps = sub_subparsers.add_parser('vapps', help='list - all vapps deployed in vCloud director')
+    list_network = sub_subparsers.add_parser('networks', help='list - all networks deployed')
+    list_network.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+    #list vdc
+    list_vdc = sub_subparsers.add_parser('vdc', help='list - list all vdc for organization accessible to you')
+    list_vdc.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+    list_org = sub_subparsers.add_parser('org', help='list - list of organizations accessible to you.')
+
+    create_sub = parser_subparsers.add_parser('create')
+    create_sub_subparsers = create_sub.add_subparsers(dest='action')
+    create_vms = create_sub_subparsers.add_parser('vms')
+    create_vapp = create_sub_subparsers.add_parser('vapp')
+    create_vapp.add_argument('uuid')
+
+    # add network
+    create_network = create_sub_subparsers.add_parser('network')
+    create_network.add_argument('network_name', action='store', help='create a network for a vdc')
+
+    # add VDC
+    create_vdc = create_sub_subparsers.add_parser('vdc')
+    create_vdc.add_argument('vdc_name', action='store', help='create a new VDC for org')
+
+    delete_sub = parser_subparsers.add_parser('delete')
+    del_sub_subparsers = delete_sub.add_subparsers(dest='action')
+    del_vms = del_sub_subparsers.add_parser('vms')
+    del_vapp = del_sub_subparsers.add_parser('vapp')
+    del_vapp.add_argument('uuid', help='view vapp based on UUID')
+
+    # delete network
+    del_network = del_sub_subparsers.add_parser('network')
+    del_network.add_argument('network_name', action='store',
+                             help='- delete network for vcloud director by provided name')
+    del_network.add_argument('-u', '--uuid', default=False, action='store_true',
+                             help='delete network for vcloud director by provided uuid')
+
+    # delete vdc
+    del_vdc = del_sub_subparsers.add_parser('vdc')
+
+    view_sub = parser_subparsers.add_parser('view')
+    view_sub_subparsers = view_sub.add_subparsers(dest='action')
+
+    view_vms_parser = view_sub_subparsers.add_parser('vms')
+    view_vms_parser.add_argument('uuid', default=False, action='store_true',
+                                 help='- View VM for specific uuid in vcloud director')
+    view_vms_parser.add_argument('name', default=False, action='store_true',
+                                 help='- View VM for specific vapp name in vcloud director')
+
+    # view vapp
+    view_vapp_parser = view_sub_subparsers.add_parser('vapp')
+    view_vapp_parser.add_argument('vapp_name', action='store',
+                                  help='- view vapp for specific vapp name in vcloud director')
+    view_vapp_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view vapp based on uuid')
+    view_vapp_parser.add_argument('-o', '--osm', default=False, action='store_true',  help='provide view in OSM format')
+
+    # view network
+    view_network = view_sub_subparsers.add_parser('network')
+    view_network.add_argument('network_name', action='store',
+                              help='- view network for specific network name in vcloud director')
+    view_network.add_argument('-u', '--uuid', default=False, action='store_true', help='view network based on uuid')
+
+    # view VDC command and actions
+    view_vdc = view_sub_subparsers.add_parser('vdc')
+    view_vdc.add_argument('vdc_name', action='store',
+                          help='- View VDC based and action based on provided vdc uuid')
+    view_vdc.add_argument('-u', '--uuid', default=False, action='store_true', help='view vdc based on uuid')
+
+    # view organization command and actions
+    view_org = view_sub_subparsers.add_parser('org')
+    view_org.add_argument('org_name', action='store',
+                          help='- View VDC based and action based on provided vdc uuid')
+    view_org.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+    # upload image action
+    image_sub = parser_subparsers.add_parser('image')
+    image_subparsers = image_sub.add_subparsers(dest='action')
+    upload_parser = image_subparsers.add_parser('upload')
+    upload_parser.add_argument('image', default=False, action='store', help='- valid path to OVF image ')
+    upload_parser.add_argument('catalog', default=False, action='store_true', help='- catalog name')
+
+    # boot vm action
+    boot_parser = image_subparsers.add_parser('boot')
+    boot_parser.add_argument('image', default=False, action='store', help='- Image name')
+    boot_parser.add_argument('vmname', default=False, action='store', help='- VM name')
+    boot_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+    namespace = parser.parse_args()
+    # put command_line args to mapping
+    command_line_args = {k: v for k, v in vars(namespace).items() if v}
+
+    d = defaults.copy()
+    d.update(os.environ)
+    d.update(command_line_args)
+
+    logger = logging.getLogger('mano.vim.vmware')
+    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    ch = logging.StreamHandler()
+    ch.setLevel(str.upper(d['debug']))
+    ch.setFormatter(formatter)
+    logger.addHandler(ch)
+    logger.setLevel(getattr(logging, str.upper(d['debug'])))
+    logger.info(
+        "Connecting {} username: {} org: {} vdc: {} ".format(d['vcdhost'], d['vcduser'], d['vcdorg'], d['vcdvdc']))
+
+    logger.debug("command: \"{}\" actio: \"{}\"".format(d['command'], d['action']))
+
+    # main entry point.
+    vmwarecli(namespace=namespace)
diff --git a/RO/osm_ro/wim/__init__.py b/RO/osm_ro/wim/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/RO/osm_ro/wim/actions.py b/RO/osm_ro/wim/actions.py
new file mode 100644 (file)
index 0000000..e199fd0
--- /dev/null
@@ -0,0 +1,420 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101,E0203,W0201
+
+"""Common logic for task management"""
+import logging
+from time import time
+
+import yaml
+
+from ..utils import (
+    filter_dict_keys,
+    filter_out_dict_keys,
+    merge_dicts,
+    remove_none_items,
+    truncate
+)
+
+PENDING, REFRESH, IGNORE = range(3)
+
+TIMEOUT = 1 * 60 * 60  # 1 hour
+MIN_ATTEMPTS = 10
+
+
+class Action(object):
+    """Create a basic object representing the action record.
+
+    Arguments:
+        record (dict): record as returned by the database
+        **kwargs: extra keyword arguments to overwrite the fields in record
+    """
+
+    PROPERTIES = [
+        'task_index',          # MD - Index number of the task.
+                               #      This together with the instance_action_id
+                               #      forms a unique key identifier
+        'action',              # MD - CREATE, DELETE, FIND
+        'item',                # MD - table name, eg. instance_wim_nets
+        'item_id',             # MD - uuid of the referenced entry in the
+                               #      previous table
+        'instance_action_id',  # MD - reference to a cohesive group of actions
+                               #      related to the same instance-scenario
+        'wim_account_id',      # MD - reference to the WIM account used
+                               #      by the thread/connector
+        'wim_internal_id',     # MD - internal ID used by the WIM to refer to
+                               #      the item
+        'datacenter_vim_id',   # MD - reference to the VIM account used
+                               #      by the thread/connector
+        'vim_id',              # MD - internal ID used by the VIM to refer to
+                               #      the item
+        'status',              # MD - SCHEDULED,BUILD,DONE,FAILED,SUPERSEDED
+        'extra',               # MD - text with yaml format at database,
+        #                             dict at memory with:
+        # `- params:     list with the params to be sent to the VIM for CREATE
+        #                or FIND. For DELETE the vim_id is taken from other
+        #                related tasks
+        # `- find:       (only for CREATE tasks) if present it should FIND
+        #                before creating and use if existing.
+        #                Contains the FIND params
+        # `- depends_on: list with the 'task_index'es of tasks that must be
+        #                completed before. e.g. a vm creation depends on a net
+        #                creation
+        # `- sdn_net_id: used for net.
+        # `- tries
+        # `- created_items:
+        #                dictionary with extra elements created that need
+        #                to be deleted. e.g. ports,
+        # `- volumes,...
+        # `- created:    False if the VIM element is not created by
+        #                other actions, and it should not be deleted
+        # `- wim_status: WIM status of the element. Stored also at database
+        #                in the item table
+        'params',              # M  - similar to extra[params]
+        'depends_on',          # M  - similar to extra[depends_on]
+        'depends',             # M  - dict with task_index(from depends_on) to
+                               #      task class
+        'error_msg',           # MD - descriptive text upon an error
+        'created_at',          # MD - task DB creation time
+        'modified_at',         # MD - last DB update time
+        'process_at',          # M  - unix epoch when to process the task
+    ]
+
+    __slots__ = PROPERTIES + [
+        'logger',
+    ]
+
+    def __init__(self, record, logger=None, **kwargs):
+        self.logger = logger or logging.getLogger('openmano.wim.action')
+        attrs = merge_dicts(dict.fromkeys(self.PROPERTIES), record, kwargs)
+        self.update(_expand_extra(attrs))
+
+    def __repr__(self):
+        return super(Action, self).__repr__() + repr(self.as_dict())
+
+    def as_dict(self, *fields):
+        """Representation of the object as a dict"""
+        attrs = (set(self.PROPERTIES) & set(fields)
+                 if fields else self.PROPERTIES)
+        return {k: getattr(self, k) for k in attrs}
+
+    def as_record(self):
+        """Returns a dict that can be send to the persistence layer"""
+        special = ['params', 'depends_on', 'depends']
+        record = self.as_dict()
+        record['extra'].update(self.as_dict(*special))
+        non_fields = special + ['process_at']
+
+        return remove_none_items(filter_out_dict_keys(record, non_fields))
+
+    def update(self, values=None, **kwargs):
+        """Update the in-memory representation of the task (works similarly to
+        dict.update). The update is NOT automatically persisted.
+        """
+        # "white-listed mass assignment"
+        updates = merge_dicts(values, kwargs)
+        for attr in set(self.PROPERTIES) & set(updates.keys()):
+            setattr(self, attr, updates[attr])
+
+    def save(self, persistence, **kwargs):
+        """Persist current state of the object to the database.
+
+        Arguments:
+            persistence: object encapsulating the database
+            **kwargs: extra properties to be updated before saving
+
+        Note:
+            If any key word argument is passed, the object itself will be
+            changed as an extra side-effect.
+        """
+        action_id = self.instance_action_id
+        index = self.task_index
+        if kwargs:
+            self.update(kwargs)
+        properties = self.as_record()
+
+        return persistence.update_action(action_id, index, properties)
+
+    def fail(self, persistence, reason, status='FAILED'):
+        """Mark action as FAILED, updating tables accordingly"""
+        persistence.update_instance_action_counters(
+            self.instance_action_id,
+            failed=1,
+            done=(-1 if self.status == 'DONE' else 0))
+
+        self.status = status
+        self.error_msg = truncate(reason)
+        self.logger.error('%s %s: %s', self.id, status, reason)
+        return self.save(persistence)
+
+    def succeed(self, persistence, status='DONE'):
+        """Mark action as DONE, updating tables accordingly"""
+        persistence.update_instance_action_counters(
+            self.instance_action_id, done=1)
+        self.status = status
+        self.logger.debug('%s %s', self.id, status)
+        return self.save(persistence)
+
+    def defer(self, persistence, reason,
+              timeout=TIMEOUT, min_attempts=MIN_ATTEMPTS):
+        """Postpone the task processing, taking care to not timeout.
+
+        Arguments:
+            persistence: object encapsulating the database
+            reason (str): explanation for the delay
+            timeout (int): maximum delay tolerated since the first attempt.
+                Note that this number is a time delta, in seconds
+            min_attempts (int): Number of attempts to try before giving up.
+        """
+        now = time()
+        last_attempt = self.extra.get('last_attempted_at') or time()
+        attempts = self.extra.get('attempts') or 0
+
+        if last_attempt - now > timeout and attempts > min_attempts:
+            self.fail(persistence,
+                      'Timeout reached. {} attempts in the last {:d} min'
+                      .format(attempts, last_attempt / 60))
+
+        self.extra['last_attempted_at'] = time()
+        self.extra['attempts'] = attempts + 1
+        self.logger.info('%s DEFERRED: %s', self.id, reason)
+        return self.save(persistence)
+
+    @property
+    def group_key(self):
+        """Key defining the group to which this tasks belongs"""
+        return (self.item, self.item_id)
+
+    @property
+    def processing(self):
+        """Processing status for the task (PENDING, REFRESH, IGNORE)"""
+        if self.status == 'SCHEDULED':
+            return PENDING
+
+        return IGNORE
+
+    @property
+    def id(self):
+        """Unique identifier of this particular action"""
+        return '{}[{}]'.format(self.instance_action_id, self.task_index)
+
+    @property
+    def is_scheduled(self):
+        return self.status == 'SCHEDULED'
+
+    @property
+    def is_build(self):
+        return self.status == 'BUILD'
+
+    @property
+    def is_done(self):
+        return self.status == 'DONE'
+
+    @property
+    def is_failed(self):
+        return self.status == 'FAILED'
+
+    @property
+    def is_superseded(self):
+        return self.status == 'SUPERSEDED'
+
+    def refresh(self, connector, persistence):
+        """Use the connector/persistence to refresh the status of the item.
+
+        After the item status is refreshed any change in the task should be
+        persisted to the database.
+
+        Arguments:
+            connector: object containing the classes to access the WIM or VIM
+            persistence: object containing the methods necessary to query the
+                database and to persist the updates
+        """
+        self.logger.debug(
+            'Action `%s` has no refresh to be done',
+            self.__class__.__name__)
+
+    def expand_dependency_links(self, task_group):
+        """Expand task indexes into actual IDs"""
+        if not self.depends_on or (
+                isinstance(self.depends, dict) and self.depends):
+            return
+
+        num_tasks = len(task_group)
+        references = {
+            "TASK-{}".format(i): task_group[i]
+            for i in self.depends_on
+            if i < num_tasks and task_group[i].task_index == i and
+            task_group[i].instance_action_id == self.instance_action_id
+        }
+        self.depends = references
+
+    def become_superseded(self, superseding):
+        """When another action tries to supersede this one,
+        we need to change both of them, so the surviving actions will be
+        logic consistent.
+
+        This method should do the required internal changes, and also
+        suggest changes for the other, superseding, action.
+
+        Arguments:
+            superseding: other task superseding this one
+
+        Returns:
+            dict: changes suggested to the action superseding this one.
+                  A special key ``superseding_needed`` is used to
+                  suggest if the superseding is actually required or not.
+                  If not present, ``superseding_needed`` is assumed to
+                  be False.
+        """
+        self.status = 'SUPERSEDED'
+        self.logger.debug(
+            'Action `%s` was superseded by `%s`',
+            self.__class__.__name__, superseding.__class__.__name__)
+        return {}
+
+    def supersede(self, others):
+        """Supersede other tasks, if necessary
+
+        Arguments:
+            others (list): action objects being superseded
+
+        When the task decide to supersede others, this method should call
+        ``become_superseded`` on the other actions, collect the suggested
+        updates and perform the necessary changes
+        """
+        # By default actions don't supersede others
+        self.logger.debug(
+            'Action `%s` does not supersede other actions',
+            self.__class__.__name__)
+
+    def process(self, connector, persistence, ovim):
+        """Abstract method, that needs to be implemented.
+        Process the current task.
+
+        Arguments:
+            connector: object with API for accessing the WAN
+                Infrastructure Manager system
+            persistence: abstraction layer for the database
+            ovim: instance of openvim, abstraction layer that enable
+                SDN-related operations
+        """
+        raise NotImplementedError
+
+
+class FindAction(Action):
+    """Abstract class that should be inherited for FIND actions, depending on
+    the item type.
+    """
+    @property
+    def processing(self):
+        if self.status in ('DONE', 'BUILD'):
+            return REFRESH
+
+        return super(FindAction, self).processing
+
+    def become_superseded(self, superseding):
+        super(FindAction, self).become_superseded(superseding)
+        info = ('vim_id', 'wim_internal_id')
+        return remove_none_items({f: getattr(self, f) for f in info})
+
+
+class CreateAction(Action):
+    """Abstract class that should be inherited for CREATE actions, depending on
+    the item type.
+    """
+    @property
+    def processing(self):
+        if self.status in ('DONE', 'BUILD'):
+            return REFRESH
+
+        return super(CreateAction, self).processing
+
+    def become_superseded(self, superseding):
+        super(CreateAction, self).become_superseded(superseding)
+
+        created = self.extra.get('created', True)
+        sdn_net_id = self.extra.get('sdn_net_id')
+        pending_info = self.wim_internal_id or self.vim_id or sdn_net_id
+        if not(created and pending_info):
+            return {}
+
+        extra_fields = ('sdn_net_id', 'interfaces', 'created_items')
+        extra_info = filter_dict_keys(self.extra or {}, extra_fields)
+
+        return {'superseding_needed': True,
+                'wim_internal_id': self.wim_internal_id,
+                'vim_id': self.vim_id,
+                'extra': remove_none_items(extra_info)}
+
+
+class DeleteAction(Action):
+    """Abstract class that should be inherited for DELETE actions, depending on
+    the item type.
+    """
+    def supersede(self, others):
+        self.logger.debug('%s %s %s %s might supersede other actions',
+                          self.id, self.action, self.item, self.item_id)
+        # First collect all the changes from the superseded tasks
+        changes = [other.become_superseded(self) for other in others]
+        needed = any(change.pop('superseding_needed', False)
+                     for change in changes)
+
+        # Deal with the nested ones first
+        extras = [change.pop('extra', None) or {} for change in changes]
+        items = [extra.pop('created_items', None) or {} for extra in extras]
+        items = merge_dicts(self.extra.get('created_items', {}), *items)
+        self.extra = merge_dicts(self.extra, {'created_items': items}, *extras)
+
+        # Accept the other ones
+        change = ((key, value) for key, value in merge_dicts(*changes).items()
+                  if key in self.PROPERTIES)
+        for attr, value in change:
+            setattr(self, attr, value)
+
+        # Reevaluate if the action itself is needed
+        if not needed:
+            self.status = 'SUPERSEDED'
+
+
+def _expand_extra(record):
+    extra = record.pop('extra', None) or {}
+    if isinstance(extra, str):
+        extra = yaml.safe_load(extra)
+
+    record['params'] = extra.get('params')
+    record['depends_on'] = extra.get('depends_on', [])
+    record['depends'] = extra.get('depends', None)
+    record['extra'] = extra
+
+    return record
diff --git a/RO/osm_ro/wim/engine.py b/RO/osm_ro/wim/engine.py
new file mode 100644 (file)
index 0000000..cf5b85a
--- /dev/null
@@ -0,0 +1,532 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module contains the domain logic, and the implementation of the
+required steps to perform VNF management and orchestration in a WAN
+environment.
+
+It works as an extension/complement to the main functions contained in the
+``nfvo.py`` file and avoids interacting directly with the database, by relying
+on the `persistence` module.
+
+No http request handling/direct interaction with the database should be present
+in this file.
+"""
+import json
+import logging
+from contextlib import contextmanager
+from itertools import groupby
+from operator import itemgetter
+from sys import exc_info
+from uuid import uuid4
+
+from ..utils import remove_none_items
+from .actions import Action
+from .errors import (
+    DbBaseException,
+    NoWimConnectedToDatacenters,
+    UnexpectedDatabaseError,
+    WimAccountNotActive
+)
+from .wim_thread import WimThread
+
+
+class WimEngine(object):
+    """Logic supporting the establishment of WAN links when NS spans across
+    different datacenters.
+    """
+    def __init__(self, persistence, logger=None, ovim=None):
+        self.persist = persistence
+        self.logger = logger or logging.getLogger('openmano.wim.engine')
+        self.threads = {}
+        self.connectors = {}
+        self.ovim = ovim
+
+    def create_wim(self, properties):
+        """Create a new wim record according to the properties
+
+        Please check the wim schema to have more information about
+        ``properties``.
+
+        The ``config`` property might contain a ``wim_port_mapping`` dict,
+        In this case, the method ``create_wim_port_mappings`` will be
+        automatically invoked.
+
+        Returns:
+            str: uuid of the newly created WIM record
+        """
+        port_mapping = ((properties.get('config', {}) or {})
+                        .pop('wim_port_mapping', {}))
+        uuid = self.persist.create_wim(properties)
+
+        if port_mapping:
+            try:
+                self.create_wim_port_mappings(uuid, port_mapping)
+            except DbBaseException as e:
+                # Rollback
+                self.delete_wim(uuid)
+                ex = UnexpectedDatabaseError('Failed to create port mappings'
+                                             'Rolling back wim creation')
+                self.logger.exception(str(ex))
+                raise ex from e
+
+        return uuid
+
+    def get_wim(self, uuid_or_name, tenant_id=None):
+        """Retrieve existing WIM record by name or id.
+
+        If ``tenant_id`` is specified, the query will be
+        limited to the WIM associated to the given tenant.
+        """
+        # Since it is a pure DB operation, we can delegate it directly
+        return self.persist.get_wim(uuid_or_name, tenant_id)
+
+    def update_wim(self, uuid_or_name, properties):
+        """Edit an existing WIM record.
+
+        ``properties`` is a dictionary with the properties being changed,
+        if a property is not present, the old value will be preserved
+
+        Similarly to create_wim, the ``config`` property might contain a
+        ``wim_port_mapping`` dict, In this case, port mappings will be
+        automatically updated.
+        """
+        port_mapping = ((properties.get('config', {}) or {})
+                        .pop('wim_port_mapping', {}))
+        orig_props = self.persist.get_by_name_or_uuid('wims', uuid_or_name)
+        uuid = orig_props['uuid']
+
+        response = self.persist.update_wim(uuid, properties)
+
+        if port_mapping:
+            try:
+                # It is very complex to diff and update individually all the
+                # port mappings. Therefore a practical approach is just delete
+                # and create it again.
+                self.persist.delete_wim_port_mappings(uuid)
+                # ^  Calling from persistence avoid reloading twice the thread
+                self.create_wim_port_mappings(uuid, port_mapping)
+            except DbBaseException as e:
+                # Rollback
+                self.update_wim(uuid_or_name, orig_props)
+                ex = UnexpectedDatabaseError('Failed to update port mappings'
+                                             'Rolling back wim updates\n')
+                self.logger.exception(str(ex))
+                raise ex from e
+
+        return response
+
+    def delete_wim(self, uuid_or_name):
+        """Kill the corresponding wim threads and erase the WIM record"""
+        # Theoretically, we can rely on the database to drop the wim_accounts
+        # automatically, since we have configures 'ON CASCADE DELETE'.
+        # However, use use `delete_wim_accounts` to kill all the running
+        # threads.
+        self.delete_wim_accounts(uuid_or_name)
+        return self.persist.delete_wim(uuid_or_name)
+
+    def create_wim_account(self, wim, tenant, properties):
+        """Create an account that associates a tenant to a WIM.
+
+        As a side effect this function will spawn a new thread
+
+        Arguments:
+            wim (str): name or uuid of the WIM related to the account being
+                created
+            tenant (str): name or uuid of the nfvo tenant to which the account
+                will be created
+            properties (dict): properties of the account
+                (eg. username, password, ...)
+
+        Returns:
+            dict: Created record
+        """
+        uuid = self.persist.create_wim_account(wim, tenant, properties)
+        account = self.persist.get_wim_account_by(uuid=uuid)
+        # ^  We need to use get_wim_account_by here, since this methods returns
+        #    all the associations, and we need the wim to create the thread
+        self._spawn_thread(account)
+        return account
+
+    def _update_single_wim_account(self, account, properties):
+        """Update WIM Account, taking care to reload the corresponding thread
+
+        Arguments:
+            account (dict): Current account record
+            properties (dict): Properties to be updated
+
+        Returns:
+            dict: updated record
+        """
+        account = self.persist.update_wim_account(account['uuid'], properties)
+        self.threads[account['uuid']].reload()
+        return account
+
+    def update_wim_accounts(self, wim, tenant, properties):
+        """Update all the accounts related to a WIM and a tenant,
+        thanking care of reloading threads.
+
+        Arguments:
+            wim (str): uuid or name of a WIM record
+            tenant (str): uuid or name of a NFVO tenant record
+            properties (dict): attributes with values to be updated
+
+        Returns
+            list: Records that were updated
+        """
+        accounts = self.persist.get_wim_accounts_by(wim, tenant)
+        return [self._update_single_wim_account(account, properties)
+                for account in accounts]
+
+    def _delete_single_wim_account(self, account):
+        """Delete WIM Account, taking care to remove the corresponding thread
+        and delete the internal WIM account, if it was automatically generated.
+
+        Arguments:
+            account (dict): Current account record
+            properties (dict): Properties to be updated
+
+        Returns:
+            dict: current record (same as input)
+        """
+        self.persist.delete_wim_account(account['uuid'])
+
+        if account['uuid'] not in self.threads:
+            raise WimAccountNotActive(
+                'Requests send to the WIM Account %s are not currently '
+                'being processed.', account['uuid'])
+        else:
+            self.threads[account['uuid']].exit()
+            del self.threads[account['uuid']]
+
+        return account
+
+    def delete_wim_accounts(self, wim, tenant=None, **kwargs):
+        """Delete all the accounts related to a WIM (and a tenant),
+        thanking care of threads and internal WIM accounts.
+
+        Arguments:
+            wim (str): uuid or name of a WIM record
+            tenant (str): uuid or name of a NFVO tenant record
+
+        Returns
+            list: Records that were deleted
+        """
+        kwargs.setdefault('error_if_none', False)
+        accounts = self.persist.get_wim_accounts_by(wim, tenant, **kwargs)
+        return [self._delete_single_wim_account(a) for a in accounts]
+
+    def _reload_wim_threads(self, wim_id):
+        for thread in self.threads.values():
+            if thread.wim_account['wim_id'] == wim_id:
+                thread.reload()
+
+    def create_wim_port_mappings(self, wim, properties, tenant=None):
+        """Store information about port mappings from Database"""
+        # TODO: Review tenants... WIMs can exist across different tenants,
+        #       and the port_mappings are a WIM property, not a wim_account
+        #       property, so the concepts are not related
+        wim = self.persist.get_by_name_or_uuid('wims', wim)
+        result = self.persist.create_wim_port_mappings(wim, properties, tenant)
+        self._reload_wim_threads(wim['uuid'])
+        return result
+
+    def get_wim_port_mappings(self, wim):
+        """Retrive information about port mappings from Database"""
+        return self.persist.get_wim_port_mappings(wim)
+
+    def delete_wim_port_mappings(self, wim):
+        """Erase the port mapping records associated with the WIM"""
+        wim = self.persist.get_by_name_or_uuid('wims', wim)
+        message = self.persist.delete_wim_port_mappings(wim['uuid'])
+        self._reload_wim_threads(wim['uuid'])
+        return message
+
+    def find_common_wims(self, datacenter_ids, tenant):
+        """Find WIMs that are common to all datacenters listed"""
+        mappings = self.persist.get_wim_port_mappings(
+            datacenter=datacenter_ids, tenant=tenant, error_if_none=False)
+
+        wim_id_of = itemgetter('wim_id')
+        sorted_mappings = sorted(mappings, key=wim_id_of)  # needed by groupby
+        grouped_mappings = groupby(sorted_mappings, key=wim_id_of)
+        mapped_datacenters = {
+            wim_id: [m['datacenter_id'] for m in mappings]
+            for wim_id, mappings in grouped_mappings
+        }
+
+        return [
+            wim_id
+            for wim_id, connected_datacenters in mapped_datacenters.items()
+            if set(connected_datacenters) >= set(datacenter_ids)
+        ]
+
+    def find_common_wim(self, datacenter_ids, tenant):
+        """Find a single WIM that is able to connect all the datacenters
+        listed
+
+        Raises:
+            NoWimConnectedToDatacenters: if no WIM connected to all datacenters
+                at once is found
+        """
+        suitable_wim_ids = self.find_common_wims(datacenter_ids, tenant)
+
+        if not suitable_wim_ids:
+            raise NoWimConnectedToDatacenters(datacenter_ids)
+
+        # TODO: use a criteria to determine which WIM is going to be used,
+        #       instead of always using the first one (strategy pattern can be
+        #       used here)
+        return suitable_wim_ids[0]
+
+    def find_suitable_wim_account(self, datacenter_ids, tenant):
+        """Find a WIM account that is able to connect all the datacenters
+        listed
+
+        Arguments:
+            datacenter_ids (list): List of UUIDs of all the datacenters (vims),
+                that need to be connected.
+            tenant (str): UUID of the OSM tenant
+
+        Returns:
+            object with the WIM account that is able to connect all the
+                 datacenters.
+        """
+        wim_id = self.find_common_wim(datacenter_ids, tenant)
+        return self.persist.get_wim_account_by(wim_id, tenant)
+
+    def derive_wan_link(self,
+                        wim_usage,
+                        instance_scenario_id, sce_net_id,
+                        networks, tenant, related=None):
+        """Create a instance_wim_nets record for the given information"""
+        if sce_net_id in wim_usage:
+            account_id = wim_usage[sce_net_id]
+            account = self.persist.get_wim_account_by(uuid=account_id)
+            wim_id = account['wim_id']
+        else:
+            datacenters = [n['datacenter_id'] for n in networks]
+            wim_id = self.find_common_wim(datacenters, tenant)
+            account = self.persist.get_wim_account_by(wim_id, tenant)
+
+        return {
+            'uuid': str(uuid4()),
+            'instance_scenario_id': instance_scenario_id,
+            'sce_net_id': sce_net_id,
+            'wim_id': wim_id,
+            'wim_account_id': account['uuid'],
+            'related': related
+        }
+
+    def derive_wan_links(self, wim_usage, networks, tenant=None):
+        """Discover and return what are the wan_links that have to be created
+        considering a set of networks (VLDs) required for a scenario instance
+        (NSR).
+
+        Arguments:
+            wim_usage(dict): Mapping between sce_net_id and wim_id. If wim_id is False, means not create wam_links
+            networks(list): Dicts containing the information about the networks
+                that will be instantiated to materialize a Network Service
+                (scenario) instance.
+                Corresponding to the ``instance_net`` record.
+
+        Returns:
+            list: list of WAN links to be written to the database
+        """
+        # Group networks by key=(instance_scenario_id, sce_net_id)
+        related = None
+        if networks:
+            related = networks[0].get("related")
+        filtered = _filter_multi_vim(networks)
+        grouped_networks = _group_networks(filtered)
+        datacenters_per_group = _count_datacenters(grouped_networks)
+        # For each group count the number of networks. If greater then 1,
+        # we have to create a wan link connecting them.
+        wan_groups = [key
+                      for key, counter in datacenters_per_group
+                      if counter > 1]
+        # Keys are tuples(instance_scenario_id, sce_net_id)
+        return [
+            self.derive_wan_link(wim_usage,
+                                 key[0], key[1], grouped_networks[key], tenant, related)
+            for key in wan_groups if wim_usage.get(key[1]) is not False
+        ]
+
+    def create_action(self, wan_link):
+        """For a single wan_link create the corresponding create action"""
+        return {
+            'action': 'CREATE',
+            'status': 'SCHEDULED',
+            'item': 'instance_wim_nets',
+            'item_id': wan_link['uuid'],
+            'wim_account_id': wan_link['wim_account_id']
+        }
+
+    def create_actions(self, wan_links):
+        """For an array of wan_links, create all the corresponding actions"""
+        return [self.create_action(l) for l in wan_links]
+
+    def delete_action(self, wan_link):
+        """For a single wan_link create the corresponding create action"""
+        return {
+            'action': 'DELETE',
+            'status': 'SCHEDULED',
+            'item': 'instance_wim_nets',
+            'item_id': wan_link['uuid'],
+            'wim_account_id': wan_link['wim_account_id'],
+            'extra': json.dumps({'wan_link': wan_link})
+            # We serialize and cache the wan_link here, because it can be
+            # deleted during the delete process
+        }
+
+    def delete_actions(self, wan_links=(), instance_scenario_id=None):
+        """Given a Instance Scenario, remove all the WAN Links created in the
+        past"""
+        if instance_scenario_id:
+            wan_links = self.persist.get_wan_links(
+                instance_scenario_id=instance_scenario_id)
+        return [self.delete_action(l) for l in wan_links]
+
+    def incorporate_actions(self, wim_actions, instance_action):
+        """Make the instance action consider new WIM actions and make the WIM
+        actions aware of the instance action
+        """
+        current = instance_action.setdefault('number_tasks', 0)
+        for i, action in enumerate(wim_actions):
+            action['task_index'] = current + i
+            action['instance_action_id'] = instance_action['uuid']
+        instance_action['number_tasks'] += len(wim_actions)
+
+        return wim_actions, instance_action
+
+    def dispatch(self, tasks):
+        """Enqueue a list of tasks for further processing.
+
+        This function is supposed to be called outside from the WIM Thread.
+        """
+        for task in tasks:
+            if task['wim_account_id'] not in self.threads:
+                error_msg = str(WimAccountNotActive(
+                    'Requests send to the WIM Account %s are not currently '
+                    'being processed.', task['wim_account_id']))
+                Action(task, self.logger).fail(self.persist, error_msg)
+                self.persist.update_wan_link(task['item_id'],
+                                             {'status': 'ERROR',
+                                              'error_msg': error_msg})
+                self.logger.error('Task %s %s %s not dispatched.\n%s',
+                                  task['action'], task['item'],
+                                  task['instance_account_id'], error_msg)
+            else:
+                self.threads[task['wim_account_id']].insert_task(task)
+                self.logger.debug('Task %s %s %s dispatched',
+                                  task['action'], task['item'],
+                                  task['instance_action_id'])
+
+    def _spawn_thread(self, wim_account):
+        """Spawn a WIM thread
+
+        Arguments:
+            wim_account (dict): WIM information (usually persisted)
+                The `wim` field is required to be set with a valid WIM record
+                inside the `wim_account` dict
+
+        Return:
+            threading.Thread: Thread object
+        """
+        thread = None
+        try:
+            thread = WimThread(self.persist, wim_account, ovim=self.ovim)
+            self.threads[wim_account['uuid']] = thread
+            thread.start()
+        except:  # noqa
+            self.logger.error('Error when spawning WIM thread for %s',
+                              wim_account['uuid'], exc_info=True)
+
+        return thread
+
+    def start_threads(self):
+        """Start the threads responsible for processing WIM Actions"""
+        accounts = self.persist.get_wim_accounts(error_if_none=False)
+        self.threads = remove_none_items(
+            {a['uuid']: self._spawn_thread(a) for a in accounts})
+
+    def stop_threads(self):
+        """Stop the threads responsible for processing WIM Actions"""
+        for uuid, thread in self.threads.items():
+            thread.exit()
+        self.threads.clear()
+
+    @contextmanager
+    def threads_running(self):
+        """Ensure no thread will be left running"""
+        # This method is particularly important for testing :)
+        try:
+            self.start_threads()
+            yield
+        finally:
+            self.stop_threads()
+
+
+def _filter_multi_vim(networks):
+    """Ignore networks without sce_net_id (all VNFs go to the same VIM)"""
+    return [n for n in networks if 'sce_net_id' in n and n['sce_net_id']]
+
+
+def _group_networks(networks):
+    """Group networks that correspond to the same instance_scenario_id and
+    sce_net_id (NSR and VLD).
+
+    Arguments:
+        networks(list): Dicts containing the information about the networks
+            that will be instantiated to materialize a Network Service
+            (scenario) instance.
+    Returns:
+        dict: Keys are tuples (instance_scenario_id, sce_net_id) and values
+            are list of networks.
+    """
+    criteria = itemgetter('instance_scenario_id', 'sce_net_id')
+
+    networks = sorted(networks, key=criteria)
+    return {k: list(v) for k, v in groupby(networks, key=criteria)}
+
+
+def _count_datacenters(grouped_networks):
+    """Count the number of datacenters in each group of networks
+
+    Returns:
+        list of tuples: the first element is the group key, while the second
+            element is the number of datacenters in each group.
+    """
+    return ((key, len(set(n['datacenter_id'] for n in group)))
+            for key, group in grouped_networks.items())
diff --git a/RO/osm_ro/wim/errors.py b/RO/osm_ro/wim/errors.py
new file mode 100644 (file)
index 0000000..e8d4b63
--- /dev/null
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+import queue
+
+from ..db_base import db_base_Exception as DbBaseException
+from ..http_tools.errors import (
+    Bad_Request,
+    Conflict,
+    HttpMappedError,
+    Internal_Server_Error,
+    Not_Found
+)
+
+
+class NoRecordFound(DbBaseException):
+    """No record was found in the database"""
+
+    def __init__(self, criteria, table=None):
+        table_info = '{} - '.format(table) if table else ''
+        super(NoRecordFound, self).__init__(
+            '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
+            http_code=Not_Found)
+
+
+class MultipleRecordsFound(DbBaseException):
+    """More than one record was found in the database"""
+
+    def __init__(self, criteria, table=None):
+        table_info = '{} - '.format(table) if table else ''
+        super(MultipleRecordsFound, self).__init__(
+            '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
+            http_code=Conflict)
+
+
+class WimAndTenantNotAttached(DbBaseException):
+    """Wim and Tenant are not attached"""
+
+    def __init__(self, wim, tenant):
+        super(WimAndTenantNotAttached, self).__init__(
+            '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
+            http_code=Conflict)
+
+
+class WimAndTenantAlreadyAttached(DbBaseException):
+    """There is already a wim account attaching the given wim and tenant"""
+
+    def __init__(self, wim, tenant):
+        super(WimAndTenantAlreadyAttached, self).__init__(
+            '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
+            http_code=Conflict)
+
+
+class NoWimConnectedToDatacenters(NoRecordFound):
+    """No WIM that is able to connect the given datacenters was found"""
+
+
+class InvalidParameters(DbBaseException):
+    """The given parameters are invalid"""
+
+    def __init__(self, message, http_code=Bad_Request):
+        super(InvalidParameters, self).__init__(message, http_code)
+
+
+class UndefinedAction(HttpMappedError):
+    """No action found"""
+
+    def __init__(self, item_type, action, http_code=Internal_Server_Error):
+        message = ('The action {} {} is not defined'.format(action, item_type))
+        super(UndefinedAction, self).__init__(message, http_code)
+
+
+class UndefinedWimConnector(DbBaseException):
+    """The connector class for the specified wim type is not implemented"""
+
+    def __init__(self, wim_type, module_name, location_reference):
+        super(UndefinedWimConnector, self).__init__(
+            ('{}: `{}`. Could not find module `{}` '
+             '(check if it is necessary to install a plugin)'
+             .format(self.__class__.__doc__, wim_type, module_name)),
+            http_code=Bad_Request)
+
+
+class WimAccountOverwrite(DbBaseException):
+    """An attempt to overwrite an existing WIM account was identified"""
+
+    def __init__(self, wim_account, diff=None, tip=None):
+        message = self.__class__.__doc__
+        account_info = (
+            'Account -- name: {name}, uuid: {uuid}'.format(**wim_account)
+            if wim_account else '')
+        diff_info = (
+            'Differing fields: ' + ', '.join(diff.keys()) if diff else '')
+
+        super(WimAccountOverwrite, self).__init__(
+            '\n'.join(m for m in (message, account_info, diff_info, tip) if m),
+            http_code=Conflict)
+
+
+class UnexpectedDatabaseError(DbBaseException):
+    """The database didn't raised an exception but also the query was not
+    executed (maybe the connection had some problems?)
+    """
+
+
+class UndefinedUuidOrName(DbBaseException):
+    """Trying to query for a record using an empty uuid or name"""
+
+    def __init__(self, table=None):
+        table_info = '{} - '.format(table.split()[0]) if table else ''
+        super(UndefinedUuidOrName, self).__init__(
+            table_info + self.__class__.__doc__, http_status=Bad_Request)
+
+
+class UndefinedWanMappingType(InvalidParameters):
+    """The dict wan_service_mapping_info MUST contain a `type` field"""
+
+    def __init__(self, given):
+        super(UndefinedWanMappingType, self).__init__(
+            '{}. Given: `{}`'.format(self.__class__.__doc__, given))
+
+
+class QueueFull(HttpMappedError, queue.Full):
+    """Thread queue is full"""
+
+    def __init__(self, thread_name, http_code=Internal_Server_Error):
+        message = ('Thread {} queue is full'.format(thread_name))
+        super(QueueFull, self).__init__(message, http_code)
+
+
+class InconsistentState(HttpMappedError):
+    """An unexpected inconsistency was find in the state of the program"""
+
+    def __init__(self, arg, http_code=Internal_Server_Error):
+        if isinstance(arg, HttpMappedError):
+            http_code = arg.http_code
+            message = str(arg)
+        else:
+            message = arg
+
+        super(InconsistentState, self).__init__(message, http_code)
+
+
+class WimAccountNotActive(HttpMappedError, KeyError):
+    """WIM Account is not active yet (no thread is running)"""
+
+    def __init__(self, message, http_code=Internal_Server_Error):
+        message += ('\nThe thread responsible for processing the actions have '
+                    'suddenly stopped, or have never being spawned')
+        super(WimAccountNotActive, self).__init__(message, http_code)
+
+
+class NoExternalPortFound(HttpMappedError):
+    """No external port associated to the instance_net"""
+
+    def __init__(self, instance_net):
+        super(NoExternalPortFound, self).__init__(
+            '{} uuid({})'.format(self.__class__.__doc__, instance_net['uuid']),
+            http_code=Not_Found)
diff --git a/RO/osm_ro/wim/failing_connector.py b/RO/osm_ro/wim/failing_connector.py
new file mode 100644 (file)
index 0000000..b66551c
--- /dev/null
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""In the case any error happens when trying to initiate the WIM Connector,
+we need a replacement for it, that will throw an error every time we try to
+execute any action
+"""
+import json
+from .wimconn import WimConnectorError
+
+
+class FailingConnector(object):
+    """Placeholder for a connector whose incitation failed,
+    This place holder will just raise an error every time an action is needed
+    from the connector.
+
+    This way we can make sure that all the other parts of the program will work
+    but the user will have all the information available to fix the problem.
+    """
+    def __init__(self, error_msg):
+        self.error_msg = error_msg
+
+    def check_credentials(self):
+        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+
+    def get_connectivity_service_status(self, service_uuid, _conn_info=None):
+        raise WimConnectorError('Impossible to retrieve status for {}\n\n{}'
+                                .format(service_uuid, self.error_msg))
+
+    def create_connectivity_service(self, service_uuid, *args, **kwargs):
+        raise WimConnectorError('Impossible to connect {}.\n{}\n{}\n{}'
+                                .format(service_uuid, self.error_msg,
+                                        json.dumps(args, indent=4),
+                                        json.dumps(kwargs, indent=4)))
+
+    def delete_connectivity_service(self, service_uuid, _conn_info=None):
+        raise WimConnectorError('Impossible to disconnect {}\n\n{}'
+                                .format(service_uuid, self.error_msg))
+
+    def edit_connectivity_service(self, service_uuid, *args, **kwargs):
+        raise WimConnectorError('Impossible to change connection {}.\n{}\n'
+                                '{}\n{}'
+                                .format(service_uuid, self.error_msg,
+                                        json.dumps(args, indent=4),
+                                        json.dumps(kwargs, indent=4)))
+
+    def clear_all_connectivity_services(self):
+        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+
+    def get_all_active_connectivity_services(self):
+        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
diff --git a/RO/osm_ro/wim/http_handler.py b/RO/osm_ro/wim/http_handler.py
new file mode 100644 (file)
index 0000000..b88dab3
--- /dev/null
@@ -0,0 +1,226 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module works as an extension to the toplevel ``httpserver`` module,
+implementing callbacks for the HTTP routes related to the WIM features of OSM.
+
+Acting as a front-end, it is responsible for converting the HTTP request
+payload into native python objects, calling the correct engine methods
+and converting back the response objects into strings to be send in the HTTP
+response payload.
+
+Direct domain/persistence logic should be avoided in this file, instead
+calls to other layers should be done.
+"""
+import logging
+
+from bottle import request
+
+from .. import utils
+from ..http_tools.errors import ErrorHandler
+from ..http_tools.handler import BaseHandler, route
+from ..http_tools.request_processing import (
+    filter_query_string,
+    format_in,
+    format_out
+)
+from .engine import WimEngine
+from .persistence import WimPersistence
+from .schemas import (
+    wim_account_schema,
+    wim_edit_schema,
+    wim_port_mapping_schema,
+    wim_schema
+)
+
+
+class WimHandler(BaseHandler):
+    """HTTP route implementations for WIM related URLs
+
+    Arguments:
+        db: instance of mydb [optional]. This argument must be provided
+            if not ``persistence`` is passed
+        persistence (WimPersistence): High-level data storage abstraction
+            [optional]. If this argument is not present, ``db`` must be.
+        engine (WimEngine): Implementation of the business logic
+            for the engine of WAN networks
+        logger (logging.Logger): logger object [optional]
+        url_base(str): Path fragment to be prepended to the routes [optional]
+        plugins(list): List of bottle plugins to be applied to routes
+            [optional]
+    """
+    def __init__(self, db=None, persistence=None, engine=None,
+                 url_base='', logger=None, plugins=()):
+        self.persist = persistence or WimPersistence(db)
+        self.engine = engine or WimEngine(self.persist)
+        self.url_base = url_base
+        self.logger = logger or logging.getLogger('openmano.wim.http')
+        error_handler = ErrorHandler(self.logger)
+        self.plugins = [error_handler] + list(plugins)
+
+    @route('GET', '/<tenant_id>/wims')
+    def http_list_wims(self, tenant_id):
+        allowed_fields = ('uuid', 'name', 'wim_url', 'type', 'created_at')
+        select_, where_, limit_ = filter_query_string(
+            request.query, None, allowed_fields)
+        # ^  Since we allow the user to customize the db query using the HTTP
+        #    query and it is quite difficult to re-use this query, let's just
+        #    do a ad-hoc call to the db
+
+        from_ = 'wims'
+        if tenant_id != 'any':
+            where_['nfvo_tenant_id'] = tenant_id
+            if 'created_at' in select_:
+                select_[select_.index('created_at')] = (
+                    'w.created_at as created_at')
+            if 'created_at' in where_:
+                where_['w.created_at'] = where_.pop('created_at')
+            from_ = ('wims as w join wim_nfvo_tenants as wt '
+                     'on w.uuid=wt.wim_id')
+
+        wims = self.persist.query(
+            FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_,
+            error_if_none=False)
+
+        utils.convert_float_timestamp2str(wims)
+        return format_out({'wims': wims})
+
+    @route('GET', '/<tenant_id>/wims/<wim_id>')
+    def http_get_wim(self, tenant_id, wim_id):
+        tenant_id = None if tenant_id == 'any' else tenant_id
+        wim = self.engine.get_wim(wim_id, tenant_id)
+        mappings = self.engine.get_wim_port_mappings(wim_id)
+        wim['config'] = utils.merge_dicts(wim.get('config', {}) or {},
+                                          wim_port_mapping=mappings)
+        return format_out({'wim': wim})
+
+    @route('POST', '/wims')
+    def http_create_wim(self):
+        http_content, _ = format_in(wim_schema, confidential_data=True)
+        r = utils.remove_extra_items(http_content, wim_schema)
+        if r:
+            self.logger.debug("Remove extra items received %r", r)
+        data = self.engine.create_wim(http_content['wim'])
+        return self.http_get_wim('any', data)
+
+    @route('PUT', '/wims/<wim_id>')
+    def http_update_wim(self, wim_id):
+        '''edit wim details, can use both uuid or name'''
+        # parse input data
+        http_content, _ = format_in(wim_edit_schema)
+        r = utils.remove_extra_items(http_content, wim_edit_schema)
+        if r:
+            self.logger.debug("Remove received extra items %s", r)
+
+        wim_id = self.engine.update_wim(wim_id, http_content['wim'])
+        return self.http_get_wim('any', wim_id)
+
+    @route('DELETE', '/wims/<wim_id>')
+    def http_delete_wim(self, wim_id):
+        """Delete a wim from a database, can use both uuid or name"""
+        data = self.engine.delete_wim(wim_id)
+        # TODO Remove WIM in orchestrator
+        return format_out({"result": "wim '" + data + "' deleted"})
+
+    @route('POST', '/<tenant_id>/wims/<wim_id>')
+    def http_create_wim_account(self, tenant_id, wim_id):
+        """Associate an existing wim to this tenant"""
+        # parse input data
+        http_content, _ = format_in(
+            wim_account_schema, confidential_data=True)
+        removed = utils.remove_extra_items(http_content, wim_account_schema)
+        removed and self.logger.debug("Remove extra items %r", removed)
+        account = self.engine.create_wim_account(
+            wim_id, tenant_id, http_content['wim_account'])
+        # check update succeeded
+        return format_out({"wim_account": account})
+
+    @route('PUT', '/<tenant_id>/wims/<wim_id>')
+    def http_update_wim_accounts(self, tenant_id, wim_id):
+        """Edit the association of an existing wim to this tenant"""
+        tenant_id = None if tenant_id == 'any' else tenant_id
+        # parse input data
+        http_content, _ = format_in(
+            wim_account_schema, confidential_data=True)
+        removed = utils.remove_extra_items(http_content, wim_account_schema)
+        removed and self.logger.debug("Remove extra items %r", removed)
+        accounts = self.engine.update_wim_accounts(
+            wim_id, tenant_id, http_content['wim_account'])
+
+        if tenant_id:
+            return format_out({'wim_account': accounts[0]})
+
+        return format_out({'wim_accounts': accounts})
+
+    @route('DELETE', '/<tenant_id>/wims/<wim_id>')
+    def http_delete_wim_accounts(self, tenant_id, wim_id):
+        """Deassociate an existing wim to this tenant"""
+        tenant_id = None if tenant_id == 'any' else tenant_id
+        accounts = self.engine.delete_wim_accounts(wim_id, tenant_id,
+                                                   error_if_none=True)
+
+        properties = (
+            (account['name'], wim_id,
+             utils.safe_get(account, 'association.nfvo_tenant_id', tenant_id))
+            for account in accounts)
+
+        return format_out({
+            'result': '\n'.join('WIM account `{}` deleted. '
+                                'Tenant `{}` detached from WIM `{}`'
+                                .format(*p) for p in properties)
+        })
+
+    @route('POST', '/<tenant_id>/wims/<wim_id>/port_mapping')
+    def http_create_wim_port_mappings(self, tenant_id, wim_id):
+        """Set the wim port mapping for a wim"""
+        # parse input data
+        http_content, _ = format_in(wim_port_mapping_schema)
+
+        data = self.engine.create_wim_port_mappings(
+            wim_id, http_content['wim_port_mapping'], tenant_id)
+        return format_out({"wim_port_mapping": data})
+
+    @route('GET', '/<tenant_id>/wims/<wim_id>/port_mapping')
+    def http_get_wim_port_mappings(self, tenant_id, wim_id):
+        """Get wim port mapping details"""
+        # TODO: tenant_id is never used, so it should be removed
+        data = self.engine.get_wim_port_mappings(wim_id)
+        return format_out({"wim_port_mapping": data})
+
+    @route('DELETE', '/<tenant_id>/wims/<wim_id>/port_mapping')
+    def http_delete_wim_port_mappings(self, tenant_id, wim_id):
+        """Clean wim port mapping"""
+        # TODO: tenant_id is never used, so it should be removed
+        data = self.engine.delete_wim_port_mappings(wim_id)
+        return format_out({"result": data})
diff --git a/RO/osm_ro/wim/persistence.py b/RO/osm_ro/wim/persistence.py
new file mode 100644 (file)
index 0000000..f0f1ac3
--- /dev/null
@@ -0,0 +1,1007 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module contains only logic related to managing records in a database
+which includes data format normalization, data format validation and etc.
+(It works as an extension to `nfvo_db.py` for the WIM feature)
+
+No domain logic/architectural concern should be present in this file.
+"""
+import json
+import logging
+from contextlib import contextmanager
+from hashlib import sha1
+from itertools import groupby
+from operator import itemgetter
+from sys import exc_info
+# from time import time
+from uuid import uuid1 as generate_uuid
+
+import yaml
+
+from ..utils import (
+    check_valid_uuid,
+    convert_float_timestamp2str,
+    expand_joined_fields,
+    filter_dict_keys,
+    filter_out_dict_keys,
+    merge_dicts,
+    remove_none_items
+)
+from .errors import (
+    DbBaseException,
+    InvalidParameters,
+    MultipleRecordsFound,
+    NoRecordFound,
+    UndefinedUuidOrName,
+    UndefinedWanMappingType,
+    UnexpectedDatabaseError,
+    WimAccountOverwrite,
+    WimAndTenantAlreadyAttached
+)
+
+_WIM = 'wims AS wim '
+
+_WIM_JOIN = (
+    _WIM +
+    ' JOIN wim_nfvo_tenants AS association '
+    '   ON association.wim_id=wim.uuid '
+    ' JOIN nfvo_tenants AS nfvo_tenant '
+    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+    ' JOIN wim_accounts AS wim_account '
+    '   ON association.wim_account_id=wim_account.uuid '
+)
+
+_WIM_ACCOUNT_JOIN = (
+    'wim_accounts AS wim_account '
+    ' JOIN wim_nfvo_tenants AS association '
+    '   ON association.wim_account_id=wim_account.uuid '
+    ' JOIN wims AS wim '
+    '   ON association.wim_id=wim.uuid '
+    ' JOIN nfvo_tenants AS nfvo_tenant '
+    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+)
+
+_DATACENTER_JOIN = (
+    'datacenters AS datacenter '
+    ' JOIN tenants_datacenters AS association '
+    '   ON association.datacenter_id=datacenter.uuid '
+    ' JOIN datacenter_tenants as datacenter_account '
+    '   ON association.datacenter_tenant_id=datacenter_account.uuid '
+    ' JOIN nfvo_tenants AS nfvo_tenant '
+    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+)
+
+_PORT_MAPPING = 'wim_port_mappings as wim_port_mapping '
+
+_PORT_MAPPING_JOIN_WIM = (
+    ' JOIN wims as wim '
+    '   ON wim_port_mapping.wim_id=wim.uuid '
+)
+
+_PORT_MAPPING_JOIN_DATACENTER = (
+    ' JOIN datacenters as datacenter '
+    '   ON wim_port_mapping.datacenter_id=datacenter.uuid '
+)
+
+_WIM_SELECT = [
+    'wim.{0} as {0}'.format(_field)
+    for _field in 'uuid name description wim_url type config '
+                  'created_at modified_at'.split()
+]
+
+_WIM_ACCOUNT_SELECT = 'uuid name user password config'.split()
+
+_PORT_MAPPING_SELECT = ('wim_port_mapping.*', )
+
+_CONFIDENTIAL_FIELDS = ('password', 'passwd')
+
+_SERIALIZED_FIELDS = ('config', 'vim_info', 'wim_info', 'conn_info', 'extra',
+                      'wan_service_mapping_info')
+
+UNIQUE_PORT_MAPPING_INFO_FIELDS = {
+    'dpid-port': ('wan_switch_dpid', 'wan_switch_port')
+}
+"""Fields that should be unique for each port mapping that relies on
+wan_service_mapping_info.
+
+For example, for port mappings of type 'dpid-port', each combination of
+wan_switch_dpid and wan_switch_port should be unique (the same switch cannot
+be connected to two different places using the same port)
+"""
+
+
+class WimPersistence(object):
+    """High level interactions with the WIM tables in the database"""
+
+    def __init__(self, db, logger=None):
+        self.db = db
+        self.logger = logger or logging.getLogger('openmano.wim.persistence')
+
+    def query(self,
+              FROM=None,
+              SELECT=None,
+              WHERE=None,
+              ORDER_BY=None,
+              LIMIT=None,
+              OFFSET=None,
+              error_if_none=True,
+              error_if_multiple=False,
+              postprocess=None,
+              hide=_CONFIDENTIAL_FIELDS,
+              **kwargs):
+        """Retrieve records from the database.
+
+        Keyword Arguments:
+            SELECT, FROM, WHERE, LIMIT, ORDER_BY: used to compose the SQL
+                query. See ``nfvo_db.get_rows``.
+            OFFSET: only valid when used togheter with LIMIT.
+                    Ignore the OFFSET first results of the query.
+            error_if_none: by default an error is raised if no record is
+                found. With this option it is possible to disable this error.
+            error_if_multiple: by default no error is raised if more then one
+                record is found.
+                With this option it is possible to enable this error.
+            postprocess: function applied to every retrieved record.
+                This function receives a dict as input and must return it
+                after modifications. Moreover this function should accept a
+                second optional parameter ``hide`` indicating
+                the confidential fiels to be obfuscated.
+                By default a minimal postprocessing function is applied,
+                obfuscating confidential fields and converting timestamps.
+            hide: option proxied to postprocess
+
+        All the remaining keyword arguments will be assumed to be ``name``s or
+        ``uuid``s to compose the WHERE statement, according to their format.
+        If the value corresponds to an array, the first element will determine
+        if it is an name or UUID.
+
+        For example:
+            - ``wim="abcdef"``` will be turned into ``wim.name="abcdef"``,
+            - ``datacenter="5286a274-8a1b-4b8d-a667-9c94261ad855"``
+               will be turned into
+               ``datacenter.uuid="5286a274-8a1b-4b8d-a667-9c94261ad855"``.
+            - ``wim=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
+               will be turned into
+               ``wim.uuid=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
+
+        Raises:
+            NoRecordFound: if the query result set is empty
+            DbBaseException: errors occuring during the execution of the query.
+        """
+        # Defaults:
+        postprocess = postprocess or _postprocess_record
+        WHERE = WHERE or {}
+
+        # Find remaining keywords by name or uuid
+        WHERE.update(_compose_where_from_uuids_or_names(**kwargs))
+        WHERE = WHERE or None
+        # ^ If the where statement is empty, it is better to leave it as None,
+        #   so it can be filtered out at a later stage
+        LIMIT = ('{:d},{:d}'.format(OFFSET, LIMIT)
+                 if LIMIT and OFFSET else LIMIT)
+
+        query = remove_none_items({
+            'SELECT': SELECT, 'FROM': FROM, 'WHERE': WHERE,
+            'LIMIT': LIMIT, 'ORDER_BY': ORDER_BY})
+
+        records = self.db.get_rows(**query)
+
+        table = FROM.split()[0]
+        if error_if_none and not records:
+            raise NoRecordFound(WHERE, table)
+
+        if error_if_multiple and len(records) > 1:
+            self.logger.error('Multiple records '
+                              'FROM %s WHERE %s:\n\n%s\n\n',
+                              FROM, WHERE, json.dumps(records, indent=4))
+            raise MultipleRecordsFound(WHERE, table)
+
+        return [
+            expand_joined_fields(postprocess(record, hide))
+            for record in records
+        ]
+
+    def query_one(self, *args, **kwargs):
+        """Similar to ``query``, but ensuring just one result.
+        ``error_if_multiple`` is enabled by default.
+        """
+        kwargs.setdefault('error_if_multiple', True)
+        records = self.query(*args, **kwargs)
+        return records[0] if records else None
+
+    def get_by_uuid(self, table, uuid, **kwargs):
+        """Retrieve one record from the database based on its uuid
+
+        Arguments:
+            table (str): table name (to be used in SQL's FROM statement).
+            uuid (str): unique identifier for record.
+
+        For additional keyword arguments and exceptions see :obj:`~.query`
+        (``error_if_multiple`` is enabled by default).
+        """
+        if uuid is None:
+            raise UndefinedUuidOrName(table)
+        return self.query_one(table, WHERE={'uuid': uuid}, **kwargs)
+
+    def get_by_name_or_uuid(self, table, uuid_or_name, **kwargs):
+        """Retrieve a record from the database based on a value that can be its
+        uuid or name.
+
+        Arguments:
+            table (str): table name (to be used in SQL's FROM statement).
+            uuid_or_name (str): this value can correspond to either uuid or
+                name
+        For additional keyword arguments and exceptions see :obj:`~.query`
+        (``error_if_multiple`` is enabled by default).
+        """
+        if uuid_or_name is None:
+            raise UndefinedUuidOrName(table)
+
+        key = 'uuid' if check_valid_uuid(uuid_or_name) else 'name'
+        return self.query_one(table, WHERE={key: uuid_or_name}, **kwargs)
+
+    def get_wims(self, uuid_or_name=None, tenant=None, **kwargs):
+        """Retrieve information about one or more WIMs stored in the database
+
+        Arguments:
+            uuid_or_name (str): uuid or name for WIM
+            tenant (str): [optional] uuid or name for NFVO tenant
+
+        See :obj:`~.query` for additional keyword arguments.
+        """
+        kwargs.update(wim=uuid_or_name, tenant=tenant)
+        from_ = _WIM_JOIN if tenant else _WIM
+        select_ = _WIM_SELECT[:] + (['wim_account.*'] if tenant else [])
+
+        kwargs.setdefault('SELECT', select_)
+        return self.query(from_, **kwargs)
+
+    def get_wim(self, wim, tenant=None, **kwargs):
+        """Similar to ``get_wims`` but ensure only one result is returned"""
+        kwargs.setdefault('error_if_multiple', True)
+        return self.get_wims(wim, tenant)[0]
+
+    def create_wim(self, wim_descriptor):
+        """Create a new wim record inside the database and returns its uuid
+
+        Arguments:
+            wim_descriptor (dict): properties of the record
+                (usually each field corresponds to a database column, but extra
+                information can be offloaded to another table or serialized as
+                JSON/YAML)
+        Returns:
+            str: UUID of the created WIM
+        """
+        if "config" in wim_descriptor:
+            wim_descriptor["config"] = _serialize(wim_descriptor["config"])
+
+        return self.db.new_row(
+            "wims", wim_descriptor, add_uuid=True, confidential_data=True)
+
+    def update_wim(self, uuid_or_name, wim_descriptor):
+        """Change an existing WIM record on the database"""
+        # obtain data, check that only one exist
+        wim = self.get_by_name_or_uuid('wims', uuid_or_name)
+
+        # edit data
+        wim_id = wim['uuid']
+        where = {'uuid': wim['uuid']}
+
+        # unserialize config, edit and serialize it again
+        new_config_dict = wim_descriptor.get('config', {}) or {}
+        config_dict = remove_none_items(merge_dicts(
+            wim.get('config', {}) or {}, new_config_dict))
+        wim_descriptor['config'] = (
+            _serialize(config_dict) if config_dict else None)
+
+        self.db.update_rows('wims', wim_descriptor, where)
+
+        return wim_id
+
+    def delete_wim(self, wim):
+        # get nfvo_tenant info
+        wim = self.get_by_name_or_uuid('wims', wim)
+
+        self.db.delete_row_by_id('wims', wim['uuid'])
+
+        return wim['uuid'] + ' ' + wim['name']
+
+    def get_wim_accounts_by(self, wim=None, tenant=None, uuid=None, **kwargs):
+        """Retrieve WIM account information from the database together
+        with the related records (wim, nfvo_tenant and wim_nfvo_tenant)
+
+        Arguments:
+            wim (str): uuid or name for WIM
+            tenant (str): [optional] uuid or name for NFVO tenant
+
+        See :obj:`~.query` for additional keyword arguments.
+        """
+        kwargs.update(wim=wim, tenant=tenant)
+        kwargs.setdefault('postprocess', _postprocess_wim_account)
+        if uuid:
+            kwargs.setdefault('WHERE', {'wim_account.uuid': uuid})
+        return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
+
+    def get_wim_account_by(self, wim=None, tenant=None, uuid=None, **kwargs):
+        """Similar to ``get_wim_accounts_by``, but ensuring just one result"""
+        kwargs.setdefault('error_if_multiple', True)
+        return self.get_wim_accounts_by(wim, tenant, uuid, **kwargs)[0]
+
+    def get_wim_accounts(self, **kwargs):
+        """Retrieve all the accounts from the database"""
+        kwargs.setdefault('postprocess', _postprocess_wim_account)
+        return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
+
+    def get_wim_account(self, uuid_or_name, **kwargs):
+        """Retrieve WIM Account record by UUID or name,
+        See :obj:`get_by_name_or_uuid` for keyword arguments.
+        """
+        kwargs.setdefault('postprocess', _postprocess_wim_account)
+        kwargs.setdefault('SELECT', _WIM_ACCOUNT_SELECT)
+        return self.get_by_name_or_uuid('wim_accounts', uuid_or_name, **kwargs)
+
+    @contextmanager
+    def _associate(self, wim_id, nfvo_tenant_id):
+        """Auxiliary method for ``create_wim_account``
+
+        This method just create a row in the association table
+        ``wim_nfvo_tenants``
+        """
+        try:
+            yield
+        except DbBaseException as db_exception:
+            error_msg = str(db_exception)
+            if all([msg in error_msg
+                    for msg in ("already in use", "'wim_nfvo_tenant'")]):
+                ex = WimAndTenantAlreadyAttached(wim_id, nfvo_tenant_id)
+                raise ex from db_exception
+            raise
+
+    def create_wim_account(self, wim, tenant, properties):
+        """Associate a wim to a tenant using the ``wim_nfvo_tenants`` table
+        and create a ``wim_account`` to store credentials and configurations.
+
+        For the sake of simplification, we assume that each NFVO tenant can be
+        attached to a WIM using only one WIM account. This is automatically
+        guaranteed via database constraints.
+        For corner cases, the same WIM can be registered twice using another
+        name.
+
+        Arguments:
+            wim (str): name or uuid of the WIM related to the account being
+                created
+            tenant (str): name or uuid of the nfvo tenant to which the account
+                will be created
+            properties (dict): properties of the account
+                (eg. user, password, ...)
+        """
+        wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']
+        tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,
+                                          SELECT=['uuid', 'name'])
+        account = properties.setdefault('name', tenant['name'])
+
+        wim_account = self.query_one('wim_accounts',
+                                     WHERE={'wim_id': wim_id, 'name': account},
+                                     error_if_none=False)
+
+        transaction = []
+        used_uuids = []
+
+        if wim_account is None:
+            # If a row for the wim account doesn't exist yet, we need to
+            # create one, otherwise we can just re-use it.
+            account_id = str(generate_uuid())
+            used_uuids.append(account_id)
+            row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)
+            transaction.append({'wim_accounts': _preprocess_wim_account(row)})
+        else:
+            account_id = wim_account['uuid']
+            properties.pop('config', None)  # Config is too complex to compare
+            diff = {k: v for k, v in properties.items() if v != wim_account[k]}
+            if diff:
+                tip = 'Edit the account first, and then attach it to a tenant'
+                raise WimAccountOverwrite(wim_account, diff, tip)
+
+        transaction.append({
+            'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],
+                                 'wim_id': wim_id,
+                                 'wim_account_id': account_id}})
+
+        with self._associate(wim_id, tenant['uuid']):
+            self.db.new_rows(transaction, used_uuids, confidential_data=True)
+
+        return account_id
+
+    def update_wim_account(self, uuid, properties, hide=_CONFIDENTIAL_FIELDS):
+        """Update WIM account record by overwriting fields with new values
+
+        Specially for the field ``config`` this means that a new dict will be
+        merged to the existing one.
+
+        Attributes:
+            uuid (str): UUID for the WIM account
+            properties (dict): fields that should be overwritten
+
+        Returns:
+            Updated wim_account
+        """
+        wim_account = self.get_by_uuid('wim_accounts', uuid)
+        safe_fields = 'user password name created'.split()
+        updates = _preprocess_wim_account(
+            merge_dicts(wim_account, filter_dict_keys(properties, safe_fields))
+        )
+
+        if properties.get('config'):
+            old_config = wim_account.get('config') or {}
+            new_config = merge_dicts(old_config, properties['config'])
+            updates['config'] = _serialize(new_config)
+
+        num_changes = self.db.update_rows('wim_accounts', UPDATE=updates,
+                                          WHERE={'uuid': wim_account['uuid']})
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError('Impossible to update wim_account '
+                                          '{name}:{uuid}'.format(*wim_account))
+
+        return self.get_wim_account(wim_account['uuid'], hide=hide)
+
+    def delete_wim_account(self, uuid):
+        """Remove WIM account record from the database"""
+        # Since we have foreign keys configured with ON CASCADE, we can rely
+        # on the database engine to guarantee consistency, deleting the
+        # dependant records
+        return self.db.delete_row_by_id('wim_accounts', uuid)
+
+    def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):
+        """Retrieve datacenter information from the database together
+        with the related records (nfvo_tenant)
+
+        Arguments:
+            datacenter (str): uuid or name for datacenter
+            tenant (str): [optional] uuid or name for NFVO tenant
+
+        See :obj:`~.query` for additional keyword arguments.
+        """
+        if tenant:
+            kwargs.update(datacenter=datacenter, tenant=tenant)
+            return self.query(_DATACENTER_JOIN, **kwargs)
+        else:
+            return [self.get_by_name_or_uuid('datacenters',
+                                             datacenter, **kwargs)]
+
+    def get_datacenter_by(self, datacenter=None, tenant=None, **kwargs):
+        """Similar to ``get_datacenters_by``, but ensuring just one result"""
+        kwargs.setdefault('error_if_multiple', True)
+        return self.get_datacenters_by(datacenter, tenant, **kwargs)[0]
+
+    def _create_single_port_mapping(self, properties):
+        info = properties.setdefault('wan_service_mapping_info', {})
+        endpoint_id = properties.get('wan_service_endpoint_id')
+
+        if info.get('mapping_type') and not endpoint_id:
+            properties['wan_service_endpoint_id'] = (
+                self._generate_port_mapping_id(info))
+
+        properties['wan_service_mapping_info'] = _serialize(info)
+
+        try:
+            self.db.new_row('wim_port_mappings', properties,
+                            add_uuid=False, confidential_data=True)
+        except DbBaseException as old_exception:
+            self.logger.exception(old_exception)
+            ex = InvalidParameters(
+                "The mapping must contain the "
+                "'pop_switch_dpid', 'pop_switch_port',  and "
+                "wan_service_mapping_info: "
+                "('wan_switch_dpid' and 'wan_switch_port') or "
+                "'wan_service_endpoint_id}'")
+            raise ex from old_exception
+
+        return properties
+
+    def create_wim_port_mappings(self, wim, port_mappings, tenant=None):
+        if not isinstance(wim, dict):
+            wim = self.get_by_name_or_uuid('wims', wim)
+
+        for port_mapping in port_mappings:
+            port_mapping['wim_name'] = wim['name']
+            datacenter = self.get_datacenter_by(
+                port_mapping['datacenter_name'], tenant)
+            for pop_wan_port_mapping in port_mapping['pop_wan_mappings']:
+                element = merge_dicts(pop_wan_port_mapping, {
+                    'wim_id': wim['uuid'],
+                    'datacenter_id': datacenter['uuid']})
+                self._create_single_port_mapping(element)
+
+        return port_mappings
+
+    def _filter_port_mappings_by_tenant(self, mappings, tenant):
+        """Make sure all the datacenters and wims listed in the port mapping
+        belong to an specific tenant
+        """
+
+        # NOTE: Theoretically this could be done at SQL level, but given the
+        #       number of tables involved (wim_port_mappings, wim_accounts,
+        #       wims, wim_nfvo_tenants, datacenters, datacenter_tenants,
+        #       tenants_datacents and nfvo_tenants), it would result in a
+        #       extremely complex query. Moreover, the predicate can vary:
+        #       for `get_wim_port_mappings` we can have any combination of
+        #       (wim, datacenter, tenant), not all of them having the 3 values
+        #       so we have combinatorial trouble to write the 'FROM' statement.
+
+        kwargs = {'tenant': tenant, 'error_if_none': False}
+        # Cache results to speedup things
+        datacenters = {}
+        wims = {}
+
+        def _get_datacenter(uuid):
+            return (
+                datacenters.get(uuid) or
+                datacenters.setdefault(
+                    uuid, self.get_datacenters_by(uuid, **kwargs)))
+
+        def _get_wims(uuid):
+            return (wims.get(uuid) or
+                    wims.setdefault(uuid, self.get_wims(uuid, **kwargs)))
+
+        return [
+            mapping
+            for mapping in mappings
+            if (_get_datacenter(mapping['datacenter_id']) and
+                _get_wims(mapping['wim_id']))
+        ]
+
+    def get_wim_port_mappings(self, wim=None, datacenter=None, tenant=None,
+                              **kwargs):
+        """List all the port mappings, optionally filtering by wim, datacenter
+        AND/OR tenant
+        """
+        from_ = [_PORT_MAPPING,
+                 _PORT_MAPPING_JOIN_WIM if wim else '',
+                 _PORT_MAPPING_JOIN_DATACENTER if datacenter else '']
+
+        criteria = ('wim_id', 'datacenter_id')
+        kwargs.setdefault('error_if_none', False)
+        mappings = self.query(
+            ' '.join(from_),
+            SELECT=_PORT_MAPPING_SELECT,
+            ORDER_BY=['wim_port_mapping.{}'.format(c) for c in criteria],
+            wim=wim, datacenter=datacenter,
+            postprocess=_postprocess_wim_port_mapping,
+            **kwargs)
+
+        if tenant:
+            mappings = self._filter_port_mappings_by_tenant(mappings, tenant)
+
+        # We don't have to sort, since we have used 'ORDER_BY'
+        grouped_mappings = groupby(mappings, key=itemgetter(*criteria))
+
+        return [
+            {'wim_id': key[0],
+             'datacenter_id': key[1],
+             'pop_wan_mappings': [
+                 filter_out_dict_keys(mapping, (
+                     'id', 'wim_id', 'datacenter_id',
+                     'created_at', 'modified_at'))
+                 for mapping in group]}
+            for key, group in grouped_mappings
+        ]
+
+    def delete_wim_port_mappings(self, wim_id):
+        self.db.delete_row(FROM='wim_port_mappings', WHERE={"wim_id": wim_id})
+        return "port mapping for wim {} deleted.".format(wim_id)
+
+    def update_wim_port_mapping(self, id, properties):
+        original = self.query_one('wim_port_mappings', WHERE={'id': id})
+
+        mapping_info = remove_none_items(merge_dicts(
+            original.get('wan_service_mapping_info') or {},
+            properties.get('wan_service_mapping_info') or {}))
+
+        updates = preprocess_record(
+            merge_dicts(original, remove_none_items(properties),
+                        wan_service_mapping_info=mapping_info))
+
+        num_changes = self.db.update_rows('wim_port_mappings',
+                                          UPDATE=updates, WHERE={'id': id})
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError(
+                'Impossible to update wim_port_mappings {}:\n{}\n'.format(
+                    id, _serialize(properties))
+            )
+
+        return num_changes
+
+    def get_actions_in_groups(self, wim_account_id,
+                              item_types=('instance_wim_nets',),
+                              group_offset=0, group_limit=150):
+        """Retrieve actions from the database in groups.
+        Each group contains all the actions that have the same ``item`` type
+        and ``item_id``.
+
+        Arguments:
+            wim_account_id: restrict the search to actions to be performed
+                using the same account
+            item_types (list): [optional] filter the actions to the given
+                item types
+            group_limit (int): maximum number of groups returned by the
+                function
+            group_offset (int): skip the N first groups. Used together with
+                group_limit for pagination purposes.
+
+        Returns:
+            List of groups, where each group is a tuple ``(key, actions)``.
+            In turn, ``key`` is a tuple containing the values of
+            ``(item, item_id)`` used to create the group and ``actions`` is a
+            list of ``vim_wim_actions`` records (dicts).
+        """
+
+        type_options = set(
+            '"{}"'.format(self.db.escape_string(t)) for t in item_types)
+
+        items = ('SELECT DISTINCT a.item, a.item_id, a.wim_account_id '
+                 'FROM vim_wim_actions AS a '
+                 'WHERE a.wim_account_id="{}" AND a.item IN ({}) '
+                 'ORDER BY a.item, a.item_id '
+                 'LIMIT {:d},{:d}').format(
+                     self.safe_str(wim_account_id),
+                     ','.join(type_options),
+                     group_offset, group_limit)
+
+        join = 'vim_wim_actions NATURAL JOIN ({}) AS items'.format(items)
+        db_results = self.db.get_rows(
+            FROM=join, ORDER_BY=('item', 'item_id', 'created_at'))
+
+        results = (_postprocess_action(r) for r in db_results)
+        criteria = itemgetter('item', 'item_id')
+        return [(k, list(g)) for k, g in groupby(results, key=criteria)]
+
+    def update_action(self, instance_action_id, task_index, properties):
+        condition = {'instance_action_id': instance_action_id,
+                     'task_index': task_index}
+        try:
+            action = self.query_one('vim_wim_actions', WHERE=condition)
+        except Exception:
+            actions = self.query('vim_wim_actions', WHERE=condition)
+            self.logger.error('More then one action found:\n%s',
+                              json.dumps(actions, indent=4))
+            action = actions[0]
+
+        extra = remove_none_items(merge_dicts(
+            action.get('extra') or {},
+            properties.get('extra') or {}))
+
+        updates = preprocess_record(
+            merge_dicts(action, properties, extra=extra))
+
+        num_changes = self.db.update_rows('vim_wim_actions', UPDATE=updates, WHERE=condition)
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError(
+                'Impossible to update vim_wim_actions '
+                '{instance_action_id}[{task_index}]'.format(*action))
+
+        return num_changes
+
+    def get_wan_links(self, uuid=None, **kwargs):
+        """Retrieve WAN link records from the database
+
+        Keyword Arguments:
+            uuid, instance_scenario_id, sce_net_id, wim_id, wim_account_id:
+                attributes that can be used at the WHERE clause
+        """
+        kwargs.setdefault('uuid', uuid)
+        kwargs.setdefault('error_if_none', False)
+
+        criteria_fields = ('uuid', 'instance_scenario_id', 'sce_net_id',
+                           'wim_id', 'wim_account_id')
+        criteria = remove_none_items(filter_dict_keys(kwargs, criteria_fields))
+        kwargs = filter_out_dict_keys(kwargs, criteria_fields)
+
+        return self.query('instance_wim_nets', WHERE=criteria, **kwargs)
+
+    def update_wan_link(self, uuid, properties):
+        wan_link = self.get_by_uuid('instance_wim_nets', uuid)
+
+        wim_info = remove_none_items(merge_dicts(
+            wan_link.get('wim_info') or {},
+            properties.get('wim_info') or {}))
+
+        updates = preprocess_record(
+            merge_dicts(wan_link, properties, wim_info=wim_info))
+
+        self.logger.debug({'UPDATE': updates})
+        num_changes = self.db.update_rows(
+            'instance_wim_nets', UPDATE=updates,
+            WHERE={'uuid': wan_link['uuid']})
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError(
+                'Impossible to update instance_wim_nets ' + wan_link['uuid'])
+
+        return num_changes
+
+    def get_instance_nets(self, instance_scenario_id, sce_net_id, **kwargs):
+        """Retrieve all the instance nets related to the same instance_scenario
+        and scenario network
+        """
+        return self.query(
+            'instance_nets',
+            WHERE={'instance_scenario_id': instance_scenario_id,
+                   'sce_net_id': sce_net_id},
+            ORDER_BY=kwargs.pop(
+                'ORDER_BY', ('instance_scenario_id', 'sce_net_id')),
+            **kwargs)
+
+    def update_instance_action_counters(self, uuid, failed=None, done=None):
+        """Atomically increment/decrement number_done and number_failed fields
+        in the instance action table
+        """
+        changes = remove_none_items({
+            'number_failed': failed and {'INCREMENT': failed},
+            'number_done': done and {'INCREMENT': done}
+        })
+
+        if not changes:
+            return 0
+
+        return self.db.update_rows('instance_actions', WHERE={'uuid': uuid}, UPDATE=changes)
+
+    def get_only_vm_with_external_net(self, instance_net_id, **kwargs):
+        """Return an instance VM if that is the only VM connected to an
+        external network identified by instance_net_id
+        """
+        counting = ('SELECT DISTINCT instance_net_id '
+                    'FROM instance_interfaces '
+                    'WHERE instance_net_id="{}" AND type="external" '
+                    'GROUP BY instance_net_id '
+                    'HAVING COUNT(*)=1').format(self.safe_str(instance_net_id))
+
+        vm_item = ('SELECT DISTINCT instance_vm_id '
+                   'FROM instance_interfaces NATURAL JOIN ({}) AS a'
+                   .format(counting))
+
+        return self.query_one(
+            'instance_vms JOIN ({}) as instance_interface '
+            'ON instance_vms.uuid=instance_interface.instance_vm_id'
+            .format(vm_item), **kwargs)
+
+    def safe_str(self, string):
+        """Return a SQL safe string"""
+        return self.db.escape_string(string)
+
+    def reconnect(self):
+        self.db.reconnect()
+
+    def _generate_port_mapping_id(self, mapping_info):
+        """Given a port mapping represented by a dict with a 'type' field,
+        generate a unique string, in a injective way.
+        """
+        mapping_info = mapping_info.copy()  # Avoid mutating original object
+        mapping_type = mapping_info.pop('mapping_type', None)
+        if not mapping_type:
+            raise UndefinedWanMappingType(mapping_info)
+
+        unique_fields = UNIQUE_PORT_MAPPING_INFO_FIELDS.get(mapping_type)
+
+        if unique_fields:
+            mapping_info = filter_dict_keys(mapping_info, unique_fields)
+        else:
+            self.logger.warning('Unique fields for WIM port mapping of type '
+                                '%s not defined. Please add a list of fields '
+                                'which combination should be unique in '
+                                'UNIQUE_PORT_MAPPING_INFO_FIELDS '
+                                '(`wim/persistency.py) ', mapping_type)
+
+        repeatable_repr = json.dumps(mapping_info, encoding='utf-8',
+                                     sort_keys=True, indent=False)
+
+        return ':'.join([mapping_type, _str2id(repeatable_repr)])
+
+
+def _serialize(value):
+    """Serialize an arbitrary value in a consistent way,
+    so it can be stored in a database inside a text field
+    """
+    return yaml.safe_dump(value, default_flow_style=True, width=256)
+
+
+def _unserialize(text):
+    """Unserialize text representation into an arbitrary value,
+    so it can be loaded from the database
+    """
+    return yaml.safe_load(text)
+
+
+def preprocess_record(record):
+    """Small transformations to be applied to the data that cames from the
+    user before writing it to the database. By default, filter out timestamps,
+    and serialize the ``config`` field.
+    """
+    automatic_fields = ['created_at', 'modified_at']
+    record = serialize_fields(filter_out_dict_keys(record, automatic_fields))
+
+    return record
+
+
+def _preprocess_wim_account(wim_account):
+    """Do the default preprocessing and convert the 'created' field from
+    boolean to string
+    """
+    wim_account = preprocess_record(wim_account)
+
+    created = wim_account.get('created')
+    wim_account['created'] = (
+        'true' if created is True or created == 'true' else 'false')
+
+    return wim_account
+
+
+def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):
+    """By default, hide passwords fields, unserialize ``config`` fields, and
+    convert float timestamps to strings
+    """
+    record = hide_confidential_fields(record, hide)
+    record = unserialize_fields(record, hide)
+
+    convert_float_timestamp2str(record)
+
+    return record
+
+
+def _postprocess_action(action):
+    if action.get('extra'):
+        action['extra'] = _unserialize(action['extra'])
+
+    return action
+
+
+def _postprocess_wim_account(wim_account, hide=_CONFIDENTIAL_FIELDS):
+    """Do the default postprocessing and convert the 'created' field from
+    string to boolean
+    """
+    # Fix fields from join
+    for field in ('type', 'description', 'wim_url'):
+        if field in wim_account:
+            wim_account['wim.'+field] = wim_account.pop(field)
+
+    for field in ('id', 'nfvo_tenant_id', 'wim_account_id'):
+        if field in wim_account:
+            wim_account['association.'+field] = wim_account.pop(field)
+
+    wim_account = _postprocess_record(wim_account, hide)
+
+    created = wim_account.get('created')
+    wim_account['created'] = (created is True or created == 'true')
+
+    return wim_account
+
+
+def _postprocess_wim_port_mapping(mapping, hide=_CONFIDENTIAL_FIELDS):
+    mapping = _postprocess_record(mapping, hide=hide)
+    mapping_info = mapping.get('wan_service_mapping_info', None) or {}
+    mapping['wan_service_mapping_info'] = mapping_info
+    return mapping
+
+
+def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):
+    """Obfuscate confidential fields from the input dict.
+
+    Note:
+        This function performs a SHALLOW operation.
+    """
+    if not(isinstance(record, dict) and fields):
+        return record
+
+    keys = list(record.keys())
+    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+    return merge_dicts(record, {k: '********' for k in keys if record[k]})
+
+
+def unserialize_fields(record, hide=_CONFIDENTIAL_FIELDS,
+                       fields=_SERIALIZED_FIELDS):
+    """Unserialize fields that where stored in the database as a serialized
+    YAML (or JSON)
+    """
+    keys = list(record.keys())
+    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+    return merge_dicts(record, {
+        key: hide_confidential_fields(_unserialize(record[key]), hide)
+        for key in keys if record[key]
+    })
+
+
+def serialize_fields(record, fields=_SERIALIZED_FIELDS):
+    """Serialize fields to be stored in the database as YAML"""
+    keys = list(record.keys())
+    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+    return merge_dicts(record, {
+        key: _serialize(record[key])
+        for key in keys if record[key] is not None
+    })
+
+
+def _decide_name_or_uuid(value):
+    reference = value
+
+    if isinstance(value, (list, tuple)):
+        reference = value[0] if value else ''
+
+    return 'uuid' if check_valid_uuid(reference) else 'name'
+
+
+def _compose_where_from_uuids_or_names(**conditions):
+    """Create a dict containing the right conditions to be used in a database
+    query.
+
+    This function chooses between ``names`` and ``uuid`` fields based on the
+    format of the passed string.
+    If a list is passed, the first element of the list will be used to choose
+    the name of the field.
+    If a ``None`` value is passed, ``uuid`` is used.
+
+    Note that this function automatically translates ``tenant`` to
+    ``nfvo_tenant`` for the sake of brevity.
+
+    Example:
+        >>> _compose_where_from_uuids_or_names(
+                wim='abcdef',
+                tenant=['xyz123', 'def456']
+                datacenter='5286a274-8a1b-4b8d-a667-9c94261ad855')
+        {'wim.name': 'abcdef',
+         'nfvo_tenant.name': ['xyz123', 'def456']
+         'datacenter.uuid': '5286a274-8a1b-4b8d-a667-9c94261ad855'}
+    """
+    if 'tenant' in conditions:
+        conditions['nfvo_tenant'] = conditions.pop('tenant')
+
+    return {
+        '{}.{}'.format(kind, _decide_name_or_uuid(value)): value
+        for kind, value in conditions.items() if value
+    }
+
+
+def _str2id(text):
+    """Create an ID (following the UUID format) from a piece of arbitrary
+    text.
+
+    Different texts should generate different IDs, and the same text should
+    generate the same ID in a repeatable way.
+    """
+    return sha1(text).hexdigest()
diff --git a/RO/osm_ro/wim/schemas.py b/RO/osm_ro/wim/schemas.py
new file mode 100644 (file)
index 0000000..101bcb1
--- /dev/null
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from ..openmano_schemas import (
+    description_schema,
+    name_schema,
+    nameshort_schema
+)
+
+# WIM -------------------------------------------------------------------------
+wim_types = ["tapi", "onos", "odl", "dynpac", "fake"]
+
+dpid_type = {
+    "type": "string",
+    "pattern":
+        "^[0-9a-zA-Z]+(:[0-9a-zA-Z]+)*$"
+}
+
+port_type = {
+    "oneOf": [
+        {"type": "string",
+         "minLength": 1,
+         "maxLength": 5},
+        {"type": "integer",
+         "minimum": 1,
+         "maximum": 65534}
+    ]
+}
+
+wim_port_mapping_desc = {
+    "type": "array",
+    "items": {
+        "type": "object",
+        "properties": {
+            "datacenter_name": nameshort_schema,
+            "pop_wan_mappings": {
+                "type": "array",
+                "items": {
+                    "type": "object",
+                    "properties": {
+                        "pop_switch_dpid": dpid_type,
+                        "pop_switch_port": port_type,
+                        "wan_service_endpoint_id": name_schema,
+                        "wan_service_mapping_info": {
+                            "type": "object",
+                            "properties": {
+                                "mapping_type": name_schema,
+                                "wan_switch_dpid": dpid_type,
+                                "wan_switch_port": port_type
+                            },
+                            "additionalProperties": True,
+                            "required": ["mapping_type"]
+                        }
+                    },
+                    "anyOf": [
+                        {
+                            "required": [
+                                "pop_switch_dpid",
+                                "pop_switch_port",
+                                "wan_service_endpoint_id"
+                            ]
+                        },
+                        {
+                            "required": [
+                                "pop_switch_dpid",
+                                "pop_switch_port",
+                                "wan_service_mapping_info"
+                            ]
+                        }
+                    ]
+                }
+            }
+        },
+        "required": ["datacenter_name", "pop_wan_mappings"]
+    }
+}
+
+wim_schema_properties = {
+    "name": name_schema,
+    "description": description_schema,
+    "type": {
+        "type": "string",
+        "enum": ["tapi", "onos", "odl", "dynpac", "fake"]
+    },
+    "wim_url": description_schema,
+    "config": {
+        "type": "object",
+        "properties": {
+            "wim_port_mapping": wim_port_mapping_desc
+        }
+    }
+}
+
+wim_schema = {
+    "title": "wim information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "wim": {
+            "type": "object",
+            "properties": wim_schema_properties,
+            "required": ["name", "type", "wim_url"],
+        }
+    },
+    "required": ["wim"],
+}
+
+wim_edit_schema = {
+    "title": "wim edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "wim": {
+            "type": "object",
+            "properties": wim_schema_properties,
+        }
+    },
+    "required": ["wim"],
+}
+
+wim_account_schema = {
+    "title": "wim account information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "wim_account": {
+            "type": "object",
+            "properties": {
+                "name": name_schema,
+                "user": nameshort_schema,
+                "password": nameshort_schema,
+                "config": {"type": "object"}
+            },
+        }
+    },
+    "required": ["wim_account"],
+}
+
+wim_port_mapping_schema = {
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "title": "wim mapping information schema",
+    "type": "object",
+    "properties": {
+        "wim_port_mapping": wim_port_mapping_desc
+    },
+    "required": ["wim_port_mapping"]
+}
diff --git a/RO/osm_ro/wim/tests/__init__.py b/RO/osm_ro/wim/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/RO/osm_ro/wim/tests/fixtures.py b/RO/osm_ro/wim/tests/fixtures.py
new file mode 100644 (file)
index 0000000..8984020
--- /dev/null
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=W0621
+
+from __future__ import unicode_literals
+
+import json
+from time import time
+from textwrap import wrap
+
+from ...tests.db_helpers import uuid, sha1
+
+NUM_WIMS = 3
+NUM_TENANTS = 2
+NUM_DATACENTERS = 2
+
+
+# In the following functions, the identifiers should be simple integers
+
+
+def wim(identifier=0):
+    return {'name': 'wim%d' % identifier,
+            'uuid': uuid('wim%d' % identifier),
+            'wim_url': 'localhost',
+            'type': 'tapi'}
+
+
+def tenant(identifier=0):
+    return {'name': 'tenant%d' % identifier,
+            'uuid': uuid('tenant%d' % identifier)}
+
+
+def wim_account(wim, tenant):
+    return {'name': 'wim-account%d%d' % (tenant, wim),
+            'uuid': uuid('wim-account%d%d' % (tenant, wim)),
+            'user': 'user%d%d' % (tenant, wim),
+            'password': 'password%d%d' % (tenant, wim),
+            'wim_id': uuid('wim%d' % wim),
+            'created': 'true'}
+
+
+def wim_tenant_association(wim, tenant):
+    return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
+            'wim_id': uuid('wim%d' % wim),
+            'wim_account_id': uuid('wim-account%d%d' % (tenant, wim))}
+
+
+def wim_set(identifier=0, tenant=0):
+    """Records necessary to create a WIM and connect it to a tenant"""
+    return [
+        {'wims': [wim(identifier)]},
+        {'wim_accounts': [wim_account(identifier, tenant)]},
+        {'wim_nfvo_tenants': [wim_tenant_association(identifier, tenant)]}
+    ]
+
+
+def _datacenter_to_switch_port(dc_id, port=None):
+    digits = 16
+    switch = ':'.join(wrap(('%0' + str(digits) + 'x') % int(dc_id), 2))
+    return (switch, str((port or int(dc_id)) + 1))
+
+
+def datacenter(identifier, external_ports_config=False):
+    config = '' if not external_ports_config else json.dumps({
+        'external_connections': [
+            {'condition': {
+                'provider:physical_network': 'provider',
+                'encapsulation_type': 'vlan'},
+                'vim_external_port':
+                dict(zip(('switch', 'port'),
+                          _datacenter_to_switch_port(identifier)))}
+        ]})
+
+    return {'uuid': uuid('dc%d' % identifier),
+            'name': 'dc%d' % identifier,
+            'type': 'openvim',
+            'vim_url': 'localhost',
+            'config': config}
+
+
+def datacenter_account(datacenter, tenant):
+    return {'name': 'dc-account%d%d' % (tenant, datacenter),
+            'uuid': uuid('dc-account%d%d' % (tenant, datacenter)),
+            'datacenter_id': uuid('dc%d' % datacenter),
+            'created': 'true'}
+
+
+def datacenter_tenant_association(datacenter, tenant):
+    return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
+            'datacenter_id': uuid('dc%d' % datacenter),
+            'datacenter_tenant_id': uuid('dc-account%d%d' % (tenant, datacenter))}
+
+
+def datacenter_set(identifier=0, tenant=0):
+    """Records necessary to create a datacenter and connect it to a tenant"""
+    return [
+        {'datacenters': [datacenter(identifier)]},
+        {'datacenter_tenants': [datacenter_account(identifier, tenant)]},
+        {'tenants_datacenters': [
+            datacenter_tenant_association(identifier, tenant)
+        ]}
+    ]
+
+
+def wim_port_mapping(wim, datacenter,
+                     pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA', pop_port=None,
+                     wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB', wan_port=None):
+    mapping_info = {'mapping_type': 'dpid-port',
+                    'wan_switch_dpid': wan_dpid,
+                    'wan_switch_port': (str(wan_port) if wan_port else
+                                        str(int(datacenter) + int(wim) + 1))}
+    id_ = 'dpid-port|' + sha1(json.dumps(mapping_info, sort_keys=True))
+
+    return {'wim_id': uuid('wim%d' % wim),
+            'datacenter_id': uuid('dc%d' % datacenter),
+            'pop_switch_dpid': pop_dpid,
+            'pop_switch_port': (str(pop_port) if pop_port else
+                                str(int(datacenter) + int(wim) + 1)),
+            # ^  Datacenter router have one port managed by each WIM
+            'wan_service_endpoint_id': id_,
+            # ^  WIM managed router have one port connected to each DC
+            'wan_service_mapping_info': json.dumps(mapping_info)}
+
+
+def processed_port_mapping(wim, datacenter,
+                           num_pairs=1,
+                           pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA',
+                           wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB'):
+    """Emulate the response of the Persistence class, where the records in the
+    data base are grouped by wim and datacenter
+    """
+    return {
+        'wim_id': uuid('wim%d' % wim),
+        'datacenter_id': uuid('dc%d' % datacenter),
+        'pop_wan_mappings': [
+            {'pop_switch_dpid': pop_dpid,
+             'pop_switch_port': wim + 1 + i,
+             'wan_service_endpoint_id':
+                 sha1('dpid-port|%s|%d' % (wan_dpid, datacenter + 1 + i)),
+             'wan_service_mapping_info': {
+                 'mapping_type': 'dpid-port',
+                 'wan_switch_dpid': wan_dpid,
+                 'wan_switch_port': datacenter + 1 + i}}
+            for i in range(num_pairs)
+        ]
+    }
+
+
+def consistent_set(num_wims=NUM_WIMS, num_tenants=NUM_TENANTS,
+                   num_datacenters=NUM_DATACENTERS,
+                   external_ports_config=False):
+    return [
+        {'nfvo_tenants': [tenant(i) for i in range(num_tenants)]},
+        {'wims': [wim(j) for j in range(num_wims)]},
+        {'wim_accounts': [
+            wim_account(j, i)
+            for i in range(num_tenants)
+            for j in range(num_wims)
+        ]},
+        {'wim_nfvo_tenants': [
+            wim_tenant_association(j, i)
+            for i in range(num_tenants)
+            for j in range(num_wims)
+        ]},
+        {'datacenters': [
+            datacenter(k, external_ports_config)
+            for k in range(num_datacenters)
+        ]},
+        {'datacenter_tenants': [
+            datacenter_account(k, i)
+            for i in range(num_tenants)
+            for k in range(num_datacenters)
+        ]},
+        {'tenants_datacenters': [
+            datacenter_tenant_association(k, i)
+            for i in range(num_tenants)
+            for k in range(num_datacenters)
+        ]},
+        {'wim_port_mappings': [
+            (wim_port_mapping(j, k, *_datacenter_to_switch_port(k))
+             if external_ports_config else wim_port_mapping(j, k))
+            for j in range(num_wims)
+            for k in range(num_datacenters)
+        ]},
+    ]
+
+
+def instance_nets(num_datacenters=2, num_links=2, status='BUILD'):
+    """Example of multi-site deploy with N datacenters and M WAN links between
+    them (e.g M = 2 -> back and forth)
+    """
+    return [
+        {'uuid': uuid('net%d%d' % (k, l)),
+         'datacenter_id': uuid('dc%d' % k),
+         'datacenter_tenant_id': uuid('dc-account0%d' % k),
+         'instance_scenario_id': uuid('nsr0'),
+         # ^  instance_scenario_id == NS Record id
+         'sce_net_id': uuid('vld%d' % l),
+         # ^  scenario net id == VLD id
+         'status': status,
+         'vim_net_id': None,
+         'created': True}
+        for k in range(num_datacenters)
+        for l in range(num_links)
+    ]
+
+
+def wim_actions(action='CREATE', status='SCHEDULED',
+                action_id=None, instance=0,
+                wim=0, tenant=0, num_links=1):
+    """Create a list of actions for the WIM,
+
+    Arguments:
+        action: type of action (CREATE) by default
+        wim: WIM fixture index to create actions for
+        tenant: tenant fixture index to create actions for
+        num_links: number of WAN links to be established by each WIM
+    """
+
+    action_id = action_id or 'ACTION-{}'.format(time())
+
+    return [
+        {
+            'action': action,
+            'wim_internal_id': uuid('-wim-net%d%d%d' % (wim, instance, link)),
+            'wim_account_id': uuid('wim-account%d%d' % (tenant, wim)),
+            'instance_action_id': action_id,
+            'item': 'instance_wim_nets',
+            'item_id': uuid('wim-net%d%d%d' % (wim, instance, link)),
+            'status': status,
+            'task_index': link,
+            'created_at': time(),
+            'modified_at': time(),
+            'extra': None
+        }
+        for link in range(num_links)
+    ]
+
+
+def instance_action(tenant=0, instance=0, action_id=None,
+                    num_tasks=1, num_done=0, num_failed=0):
+    action_id = action_id or 'ACTION-{}'.format(time())
+
+    return {
+        'uuid': action_id,
+        'tenant_id': uuid('tenant%d' % tenant),
+        'instance_id': uuid('nsr%d' % instance),
+        'number_tasks': num_tasks,
+        'number_done': num_done,
+        'number_failed': num_failed,
+    }
+
+
+def instance_wim_nets(instance=0, wim=0, num_links=1,
+                      status='SCHEDULED_CREATION'):
+    """Example of multi-site deploy with N wims and M WAN links between
+    them (e.g M = 2 -> back and forth)
+    VIM nets
+    """
+    return [
+        {'uuid': uuid('wim-net%d%d%d' % (wim, instance, l)),
+         'wim_id': uuid('wim%d' % wim),
+         'wim_account_id': uuid('wim-account%d' % wim),
+         'wim_internal_id': uuid('-net%d%d' % (wim, l)),
+         'instance_scenario_id': uuid('nsr%d' % instance),
+         # ^  instance_scenario_id == NS Record id
+         'sce_net_id': uuid('vld%d' % l),
+         # ^  scenario net id == VLD id
+         'status': status,
+         'created': False}
+        for l in range(num_links)
+    ]
+
+
+def instance_vm(instance=0, vim_info=None):
+    vim_info = {'OS-EXT-SRV-ATTR:hypervisor_hostname': 'host%d' % instance}
+    return {
+        'uuid': uuid('vm%d' % instance),
+        'instance_vnf_id': uuid('vnf%d' % instance),
+        'vm_id': uuid('vm%d' % instance),
+        'vim_vm_id': uuid('vm%d' % instance),
+        'status': 'ACTIVE',
+        'vim_info': vim_info,
+    }
+
+
+def instance_interface(instance=0, interface=0, datacenter=0, link=0):
+    return {
+        'uuid': uuid('interface%d%d' % (instance, interface)),
+        'instance_vm_id': uuid('vm%d' % instance),
+        'instance_net_id': uuid('net%d%d' % (datacenter, link)),
+        'interface_id': uuid('iface%d' % interface),
+        'type': 'external',
+        'vlan': 3
+    }
diff --git a/RO/osm_ro/wim/tests/test_actions.py b/RO/osm_ro/wim/tests/test_actions.py
new file mode 100644 (file)
index 0000000..3756869
--- /dev/null
@@ -0,0 +1,454 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101
+
+#from __future__ import unicode_literals, print_function
+
+import json
+import unittest
+from time import time
+
+from unittest.mock import MagicMock, patch
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+    TestCaseWithDatabasePerTest,
+    disable_foreign_keys,
+    uuid,
+)
+from ..persistence import WimPersistence, preprocess_record
+from ..wan_link_actions import WanLinkCreate, WanLinkDelete
+from ..wimconn import WimConnectorError
+
+
+class TestActionsWithDb(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestActionsWithDb, self).setUp()
+        self.persist = WimPersistence(self.db)
+        self.connector = MagicMock()
+        self.ovim = MagicMock()
+
+
+class TestCreate(TestActionsWithDb):
+    @disable_foreign_keys
+    def test_process__instance_nets_on_build(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is still being built
+        wan_link = eg.instance_wim_nets()
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+        for net in instance_nets:
+            net['status'] = 'BUILD'
+        self.populate([{'instance_nets': instance_nets,
+                        'instance_wim_nets': wan_link}])
+
+        # When we try to process a CREATE action that refers to the same
+        # instance_scenario_id and sce_net_id
+        now = time()
+        action = WanLinkCreate(eg.wim_actions('CREATE')[0])
+        action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
+        action.sce_net_id = instance_nets[0]['sce_net_id']
+        # -- ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+        # <-- #
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be defered
+        assert action.is_scheduled
+        self.assertEqual(action.extra['attempts'], 1)
+        self.assertGreater(action.extra['last_attempted_at'], now)
+
+    @disable_foreign_keys
+    def test_process__instance_nets_on_error(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and at least one local network is in a not good state (error, or
+        # being deleted)
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+        instance_nets[1]['status'] = 'SCHEDULED_DELETION'
+        wan_link = eg.instance_wim_nets()
+        self.populate([{'instance_nets': instance_nets,
+                        'instance_wim_nets': wan_link}])
+
+        # When we try to process a CREATE action that refers to the same
+        # instance_scenario_id and sce_net_id
+        action = WanLinkCreate(eg.wim_actions('CREATE')[0])
+        action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
+        action.sce_net_id = instance_nets[0]['sce_net_id']
+        # -- ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+        # <-- #
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should fail
+        assert action.is_failed
+        self.assertIn('issue with the local networks', action.error_msg)
+        self.assertIn('SCHEDULED_DELETION', action.error_msg)
+
+    def prepare_create__rules(self):
+        db_state = eg.consistent_set(num_wims=1, num_tenants=1,
+                                     num_datacenters=2,
+                                     external_ports_config=True)
+
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
+                                         status='ACTIVE')
+        for i, net in enumerate(instance_nets):
+            net['vim_info'] = {}
+            net['vim_info']['provider:physical_network'] = 'provider'
+            net['vim_info']['encapsulation_type'] = 'vlan'
+            net['vim_info']['encapsulation_id'] = i
+            net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+        instance_action = eg.instance_action(action_id='ACTION-000')
+
+        db_state += [
+            {'instance_wim_nets': eg.instance_wim_nets()},
+            {'instance_nets': [preprocess_record(r) for r in instance_nets]},
+            {'instance_actions': instance_action}]
+
+        action = WanLinkCreate(
+            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        db_state += [{'vim_wim_actions': action_record}]
+
+        return db_state, action
+
+    @disable_foreign_keys
+    def test_process__rules(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is already created
+        db_state, action = self.prepare_create__rules()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        # If the connector works fine
+        with patch.object(self.connector, 'create_connectivity_service',
+                          lambda *_, **__: (uuid('random-id'), None)):
+            # When we try to process a CREATE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be succeeded
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done + 1)
+        self.assertEqual(instance_action['number_failed'], number_failed)
+
+    @disable_foreign_keys
+    def test_process__rules_fail(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is already created
+        db_state, action = self.prepare_create__rules()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        # If the connector raises an error
+        with patch.object(self.connector, 'create_connectivity_service',
+                          MagicMock(side_effect=WimConnectorError('foobar'))):
+            # When we try to process a CREATE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be fail
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'FAILED')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done)
+        self.assertEqual(instance_action['number_failed'], number_failed + 1)
+
+    def prepare_create__sdn(self):
+        db_state = eg.consistent_set(num_wims=1, num_tenants=1,
+                                     num_datacenters=2,
+                                     external_ports_config=False)
+
+        # Make sure all port_mappings are predictable
+        switch = 'AA:AA:AA:AA:AA:AA:AA:AA'
+        port = 1
+        port_mappings = next(r['wim_port_mappings']
+                             for r in db_state if 'wim_port_mappings' in r)
+        for mapping in port_mappings:
+            mapping['pop_switch_dpid'] = switch
+            mapping['pop_switch_port'] = port
+
+        instance_action = eg.instance_action(action_id='ACTION-000')
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
+                                         status='ACTIVE')
+        for i, net in enumerate(instance_nets):
+            net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+        db_state += [{'instance_nets': instance_nets},
+                     {'instance_wim_nets': eg.instance_wim_nets()},
+                     {'instance_actions': instance_action}]
+
+        action = WanLinkCreate(
+            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        db_state += [{'vim_wim_actions': action_record}]
+
+        ovim_patch = patch.object(
+            self.ovim, 'get_ports', MagicMock(return_value=[{
+                'switch_dpid': switch,
+                'switch_port': port,
+            }]))
+
+        return db_state, action, ovim_patch
+
+    @disable_foreign_keys
+    def test_process__sdn(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is already created
+        db_state, action, ovim_patch = self.prepare_create__sdn()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        connector_patch = patch.object(
+            self.connector, 'create_connectivity_service',
+            lambda *_, **__: (uuid('random-id'), None))
+
+        # If the connector works fine
+        with connector_patch, ovim_patch:
+            # When we try to process a CREATE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be succeeded
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done + 1)
+        self.assertEqual(instance_action['number_failed'], number_failed)
+
+    @disable_foreign_keys
+    def test_process__sdn_fail(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is already created
+        db_state, action, ovim_patch = self.prepare_create__sdn()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        connector_patch = patch.object(
+            self.connector, 'create_connectivity_service',
+            MagicMock(side_effect=WimConnectorError('foobar')))
+
+        # If the connector throws an error
+        with connector_patch, ovim_patch:
+            # When we try to process a CREATE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be fail
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'FAILED')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done)
+        self.assertEqual(instance_action['number_failed'], number_failed + 1)
+
+
+class TestDelete(TestActionsWithDb):
+    @disable_foreign_keys
+    def test_process__no_internal_id(self):
+        # Given no WAN link was created yet,
+        # when we try to process a DELETE action, with no wim_internal_id
+        action = WanLinkDelete(eg.wim_actions('DELETE')[0])
+        action.wim_internal_id = None
+        # -- ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record,
+                        'instance_wim_nets': eg.instance_wim_nets()}])
+        # <-- #
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should succeed
+        assert action.is_done
+
+    def prepare_delete(self):
+        db_state = eg.consistent_set(num_wims=1, num_tenants=1,
+                                     num_datacenters=2,
+                                     external_ports_config=True)
+
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
+                                         status='ACTIVE')
+        for i, net in enumerate(instance_nets):
+            net['vim_info'] = {}
+            net['vim_info']['provider:physical_network'] = 'provider'
+            net['vim_info']['encapsulation_type'] = 'vlan'
+            net['vim_info']['encapsulation_id'] = i
+            net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+        instance_action = eg.instance_action(action_id='ACTION-000')
+
+        db_state += [
+            {'instance_wim_nets': eg.instance_wim_nets()},
+            {'instance_nets': [preprocess_record(r) for r in instance_nets]},
+            {'instance_actions': instance_action}]
+
+        action = WanLinkDelete(
+            eg.wim_actions('DELETE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        db_state += [{'vim_wim_actions': action_record}]
+
+        return db_state, action
+
+    @disable_foreign_keys
+    def test_process(self):
+        # Given we want to delete 1 WAN link between 2 datacenters
+        db_state, action = self.prepare_delete()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        connector_patch = patch.object(
+            self.connector, 'delete_connectivity_service')
+
+        # If the connector works fine
+        with connector_patch:
+            # When we try to process a DELETE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be succeeded
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done + 1)
+        self.assertEqual(instance_action['number_failed'], number_failed)
+
+    @disable_foreign_keys
+    def test_process__wan_link_error(self):
+        # Given we have a delete action that targets a wan link with an error
+        db_state, action = self.prepare_delete()
+        wan_link = [tables for tables in db_state
+                    if tables.get('instance_wim_nets')][0]['instance_wim_nets']
+        from pprint import pprint
+        pprint(wan_link)
+        wan_link[0]['status'] = 'ERROR'
+        self.populate(db_state)
+
+        # When we try to process it
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then it should fail
+        assert action.is_failed
+
+    def create_action(self):
+        action = WanLinkCreate(
+            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+
+        return action
+
+    @disable_foreign_keys
+    def test_create_and_delete(self):
+        # Given a CREATE action was well succeeded
+        db_state, delete_action = self.prepare_delete()
+        self.populate(db_state)
+
+        delete_action.save(self.persist, task_index=1)
+        create_action = self.create_action()
+
+        connector_patch = patch.multiple(
+            self.connector,
+            delete_connectivity_service=MagicMock(),
+            create_connectivity_service=(
+                lambda *_, **__: (uuid('random-id'), None)))
+
+        with connector_patch:  # , ovim_patch:
+            create_action.process(self.connector, self.persist, self.ovim)
+
+        # When we try to process a CREATE action that refers to the same
+        # instance_scenario_id and sce_net_id
+        with connector_patch:
+            delete_action.process(self.connector, self.persist, self.ovim)
+
+        # Then the DELETE action should be successful
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': delete_action.instance_action_id,
+            'task_index': delete_action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/wim/tests/test_engine.py b/RO/osm_ro/wim/tests/test_engine.py
new file mode 100644 (file)
index 0000000..d518123
--- /dev/null
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+
+from unittest.mock import MagicMock
+
+from . import fixtures as eg
+from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
+from ..errors import NoWimConnectedToDatacenters
+from ..engine import WimEngine
+from ..persistence import WimPersistence
+
+
+class TestWimEngineDbMethods(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestWimEngineDbMethods, self).setUp()
+        self.persist = WimPersistence(self.db)
+        self.engine = WimEngine(persistence=self.persist)
+        self.addCleanup(self.engine.stop_threads)
+
+    def populate(self, seeds=None):
+        super(TestWimEngineDbMethods, self).populate(
+            seeds or eg.consistent_set())
+
+    def test_find_common_wims(self):
+        # Given we have 2 WIM, 3 datacenters, but just 1 of the WIMs have
+        # access to them
+        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.wim_set(1, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      eg.datacenter_set(2, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1),
+                          eg.wim_port_mapping(0, 2)]}])
+
+        # When we retrieve the wims interconnecting some datacenters
+        wim_ids = self.engine.find_common_wims(
+            [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+        # Then we should have just the first wim
+        self.assertEqual(len(wim_ids), 1)
+        self.assertEqual(wim_ids[0], uuid('wim0'))
+
+    def test_find_common_wims_multiple(self):
+        # Given we have 2 WIM, 3 datacenters, and all the WIMs have access to
+        # all datacenters
+        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.wim_set(1, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      eg.datacenter_set(2, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1),
+                          eg.wim_port_mapping(0, 2),
+                          eg.wim_port_mapping(1, 0),
+                          eg.wim_port_mapping(1, 1),
+                          eg.wim_port_mapping(1, 2)]}])
+
+        # When we retrieve the wims interconnecting tree datacenters
+        wim_ids = self.engine.find_common_wims(
+            [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+        # Then we should have all the wims
+        self.assertEqual(len(wim_ids), 2)
+        self.assertItemsEqual(wim_ids, [uuid('wim0'), uuid('wim1')])
+
+    def test_find_common_wim(self):
+        # Given we have 1 WIM, 3 datacenters but the WIM have access to just 2
+        # of them
+        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      eg.datacenter_set(2, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1)]}])
+
+        # When we retrieve the common wim for the 2 datacenter that are
+        # interconnected
+        wim_id = self.engine.find_common_wim(
+            [uuid('dc0'), uuid('dc1')], tenant='tenant0')
+
+        # Then we should find the wim
+        self.assertEqual(wim_id, uuid('wim0'))
+
+        # When we try to retrieve the common wim for the all the datacenters
+        # Then a NoWimConnectedToDatacenters exception should be raised
+        with self.assertRaises(NoWimConnectedToDatacenters):
+            self.engine.find_common_wim(
+                [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+    def test_find_common_wim__different_tenants(self):
+        # Given we have 1 WIM and 2 datacenters connected but the WIMs don't
+        # belong to the tenant we have access to...
+        self.populate([{'nfvo_tenants': [eg.tenant(0), eg.tenant(1)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1)]}])
+
+        # When we retrieve the common wim for the 2 datacenter that are
+        # interconnected, but using another tenant,
+        # Then we should get an exception
+        with self.assertRaises(NoWimConnectedToDatacenters):
+            self.engine.find_common_wim(
+                [uuid('dc0'), uuid('dc1')], tenant='tenant1')
+
+
+class TestWimEngine(unittest.TestCase):
+    def test_derive_wan_link(self):
+        # Given we have 2 datacenters connected by the same WIM, with port
+        # mappings registered
+        mappings = [eg.processed_port_mapping(0, 0),
+                    eg.processed_port_mapping(0, 1)]
+        persist = MagicMock(
+            get_wim_port_mappings=MagicMock(return_value=mappings))
+
+        engine = WimEngine(persistence=persist)
+        self.addCleanup(engine.stop_threads)
+
+        # When we receive a list of 4 instance nets, representing
+        # 2 VLDs connecting 2 datacenters each
+        instance_nets = eg.instance_nets(2, 2)
+        wan_links = engine.derive_wan_links({}, instance_nets, uuid('tenant0'))
+
+        # Then we should derive 2 wan_links with the same instance_scenario_id
+        # and different scenario_network_id
+        self.assertEqual(len(wan_links), 2)
+        for link in wan_links:
+            self.assertEqual(link['instance_scenario_id'], uuid('nsr0'))
+        # Each VLD needs a network to be created in each datacenter
+        self.assertItemsEqual([l['sce_net_id'] for l in wan_links],
+                              [uuid('vld0'), uuid('vld1')])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/wim/tests/test_http_handler.py b/RO/osm_ro/wim/tests/test_http_handler.py
new file mode 100644 (file)
index 0000000..e42e53c
--- /dev/null
@@ -0,0 +1,573 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+
+import bottle
+from unittest.mock import MagicMock, patch
+from webtest import TestApp
+
+from . import fixtures as eg  # "examples"
+from ...http_tools.errors import Conflict, Not_Found
+from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
+from ...utils import merge_dicts
+from ..http_handler import WimHandler
+
+OK = 200
+
+
+@patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())  # Avoid external calls
+@patch('osm_ro.wim.wim_thread.WimThread.start', MagicMock())  # Avoid running
+class TestHttpHandler(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestHttpHandler, self).setUp()
+        bottle.debug(True)
+        handler = WimHandler(db=self.db)
+        self.engine = handler.engine
+        self.addCleanup(self.engine.stop_threads)
+        self.app = TestApp(handler.wsgi_app)
+
+    def populate(self, seeds=None):
+        super(TestHttpHandler, self).populate(seeds or eg.consistent_set())
+
+    def test_list_wims(self):
+        # Given some wims are registered in the database
+        self.populate()
+        # when a GET /<tenant_id>/wims request arrives
+        tenant_id = uuid('tenant0')
+        response = self.app.get('/{}/wims'.format(tenant_id))
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and all the registered wims should be present
+        retrieved_wims = {v['name']: v for v in response.json['wims']}
+        for name in retrieved_wims:
+            identifier = int(name.replace('wim', ''))
+            self.assertDictContainsSubset(
+                eg.wim(identifier), retrieved_wims[name])
+
+    def test_show_wim(self):
+        # Given some wims are registered in the database
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id> request arrives
+        tenant_id = uuid('tenant0')
+        wim_id = uuid('wim1')
+        response = self.app.get('/{}/wims/{}'.format(tenant_id, wim_id))
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the registered wim (wim1) should be present
+        self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
+        # Moreover, it also works with tenant_id =  all
+        response = self.app.get('/any/wims/{}'.format(wim_id))
+        self.assertEqual(response.status_code, OK)
+        self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
+
+    def test_show_wim__wim_doesnt_exists(self):
+        # Given wim_id does not refer to any already registered wim
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id> request arrives
+        tenant_id = uuid('tenant0')
+        wim_id = uuid('wim999')
+        response = self.app.get(
+            '/{}/wims/{}'.format(tenant_id, wim_id),
+            expect_errors=True)
+
+        # then the result should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+
+    def test_show_wim__tenant_doesnt_exists(self):
+        # Given wim_id does not refer to any already registered wim
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id> request arrives
+        tenant_id = uuid('tenant999')
+        wim_id = uuid('wim0')
+        response = self.app.get(
+            '/{}/wims/{}'.format(tenant_id, wim_id),
+            expect_errors=True)
+
+        # then the result should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+
+    def test_edit_wim(self):
+        # Given a WIM exists in the database
+        self.populate()
+        # when a PUT /wims/<wim_id> request arrives
+        wim_id = uuid('wim1')
+        response = self.app.put_json('/wims/{}'.format(wim_id), {
+            'wim': {'name': 'My-New-Name'}})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the registered wim (wim1) should be present
+        self.assertDictContainsSubset(
+            merge_dicts(eg.wim(1), name='My-New-Name'),
+            response.json['wim'])
+
+    def test_edit_wim__port_mappings(self):
+        # Given a WIM exists in the database
+        self.populate()
+        # when a PUT /wims/<wim_id> request arrives
+        wim_id = uuid('wim1')
+        response = self.app.put_json(
+            '/wims/{}'.format(wim_id), {
+                'wim': dict(
+                    name='My-New-Name',
+                    config={'wim_port_mapping': [{
+                        'datacenter_name': 'dc0',
+                        'pop_wan_mappings': [{
+                            'pop_switch_dpid': '00:AA:11:BB:22:CC:33:DD',
+                            'pop_switch_port': 1,
+                            'wan_service_mapping_info': {
+                                'mapping_type': 'dpid-port',
+                                'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:0A',
+                                'wan_switch_port': 1
+                            }
+                        }]}]
+                    }
+                )
+            }
+        )
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the registered wim (wim1) should be present
+        self.assertDictContainsSubset(
+            merge_dicts(eg.wim(1), name='My-New-Name'),
+            response.json['wim'])
+        # and the port mappings hould be updated
+        mappings = response.json['wim']['config']['wim_port_mapping']
+        self.assertEqual(len(mappings), 1)
+        self.assertEqual(
+            mappings[0]['pop_wan_mappings'][0]['pop_switch_dpid'],
+            '00:AA:11:BB:22:CC:33:DD')
+
+    def test_delete_wim(self):
+        # Given a WIM exists in the database
+        self.populate()
+        num_accounts = self.count('wim_accounts')
+        num_associations = self.count('wim_nfvo_tenants')
+        num_mappings = self.count('wim_port_mappings')
+
+        with self.engine.threads_running():
+            num_threads = len(self.engine.threads)
+            # when a DELETE /wims/<wim_id> request arrives
+            wim_id = uuid('wim1')
+            response = self.app.delete('/wims/{}'.format(wim_id))
+            num_threads_after = len(self.engine.threads)
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertIn('deleted', response.json['result'])
+        # and the registered wim1 should be deleted
+        response = self.app.get(
+            '/any/wims/{}'.format(wim_id),
+            expect_errors=True)
+        self.assertEqual(response.status_code, Not_Found)
+        # and all the dependent records in other tables should be deleted:
+        # wim_accounts, wim_nfvo_tenants, wim_port_mappings
+        self.assertEqual(self.count('wim_nfvo_tenants'),
+                         num_associations - eg.NUM_TENANTS)
+        self.assertLess(self.count('wim_port_mappings'), num_mappings)
+        self.assertEqual(self.count('wim_accounts'),
+                         num_accounts - eg.NUM_TENANTS)
+        # And the threads associated with the wim accounts should be stopped
+        self.assertEqual(num_threads_after, num_threads - eg.NUM_TENANTS)
+
+    def test_create_wim(self):
+        # Given no WIM exists yet
+        # when a POST /wims request arrives with the right payload
+        response = self.app.post_json('/wims', {'wim': eg.wim(999)})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim']['name'], 'wim999')
+
+    def test_create_wim__port_mappings(self):
+        self.populate()
+        # when a POST /wims request arrives with the right payload
+        response = self.app.post_json(
+            '/wims', {
+                'wim': merge_dicts(
+                    eg.wim(999),
+                    config={'wim_port_mapping': [{
+                        'datacenter_name': 'dc0',
+                        'pop_wan_mappings': [{
+                            'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:01',
+                            'pop_switch_port': 1,
+                            'wan_service_mapping_info': {
+                                'mapping_type': 'dpid-port',
+                                'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:01',
+                                'wan_switch_port': 1
+                            }
+                        }]}]
+                    }
+                )
+            }
+        )
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim']['name'], 'wim999')
+        self.assertEqual(
+            len(response.json['wim']['config']['wim_port_mapping']), 1)
+
+    def test_create_wim_account(self):
+        # Given a WIM and a NFVO tenant exist but are not associated
+        self.populate([{'wims': [eg.wim(0)]},
+                       {'nfvo_tenants': [eg.tenant(0)]}])
+
+        with self.engine.threads_running():
+            num_threads = len(self.engine.threads)
+            # when a POST /<tenant_id>/wims/<wim_id> arrives
+            response = self.app.post_json(
+                '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
+                {'wim_account': eg.wim_account(0, 0)})
+
+            num_threads_after = len(self.engine.threads)
+
+        # then a new thread should be created
+        self.assertEqual(num_threads_after, num_threads + 1)
+
+        # and the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim_account']['name'], 'wim-account00')
+
+        # and a new association record should be created
+        association = self.db.get_rows(FROM='wim_nfvo_tenants')
+        assert association
+        self.assertEqual(len(association), 1)
+        self.assertEqual(association[0]['wim_id'], uuid('wim0'))
+        self.assertEqual(association[0]['nfvo_tenant_id'], uuid('tenant0'))
+        self.assertEqual(association[0]['wim_account_id'],
+                         response.json['wim_account']['uuid'])
+
+    def test_create_wim_account__existing_account(self):
+        # Given a WIM, a WIM account and a NFVO tenants exist
+        # But the NFVO and the WIM are not associated
+        self.populate([
+            {'wims': [eg.wim(0)]},
+            {'nfvo_tenants': [eg.tenant(0)]},
+            {'wim_accounts': [eg.wim_account(0, 0)]}])
+
+        # when a POST /<tenant_id>/wims/<wim_id> arrives
+        # and it refers to an existing wim account
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
+            {'wim_account': {'name': 'wim-account00'}})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the association should be created
+        association = self.db.get_rows(
+            FROM='wim_nfvo_tenants',
+            WHERE={'wim_id': uuid('wim0'),
+                   'nfvo_tenant_id': uuid('tenant0')})
+        assert association
+        self.assertEqual(len(association), 1)
+        # but no new wim_account should be created
+        wim_accounts = self.db.get_rows(FROM='wim_accounts')
+        self.assertEqual(len(wim_accounts), 1)
+        self.assertEqual(wim_accounts[0]['name'], 'wim-account00')
+
+    def test_create_wim_account__existing_account__differing(self):
+        # Given a WIM, a WIM account and a NFVO tenants exist
+        # But the NFVO and the WIM are not associated
+        self.populate([
+            {'wims': [eg.wim(0)]},
+            {'nfvo_tenants': [eg.tenant(0)]},
+            {'wim_accounts': [eg.wim_account(0, 0)]}])
+
+        # when a POST /<tenant_id>/wims/<wim_id> arrives
+        # and it refers to an existing wim account,
+        # but with different fields
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+                'wim_account': {
+                    'name': 'wim-account00',
+                    'user': 'john',
+                    'password': 'abc123'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Conflict)
+        # some useful message should be displayed
+        response.mustcontain('attempt to overwrite', 'user', 'password')
+        # and the association should not be created
+        association = self.db.get_rows(
+            FROM='wim_nfvo_tenants',
+            WHERE={'wim_id': uuid('wim0'),
+                   'nfvo_tenant_id': uuid('tenant0')})
+        assert not association
+
+    def test_create_wim_account__association_already_exists(self):
+        # Given a WIM, a WIM account and a NFVO tenants exist
+        # and are correctly associated
+        self.populate()
+        num_assoc_before = self.count('wim_nfvo_tenants')
+
+        # when a POST /<tenant_id>/wims/<wim_id> arrives trying to connect a
+        # WIM and a tenant for the second time
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+                'wim_account': {
+                    'user': 'user999',
+                    'password': 'password999'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Conflict)
+        # the message should be useful
+        response.mustcontain('There is already', uuid('wim0'), uuid('tenant0'))
+
+        num_assoc_after = self.count('wim_nfvo_tenants')
+
+        # and the number of association record should not be increased
+        self.assertEqual(num_assoc_before, num_assoc_after)
+
+    def test_create_wim__tenant_doesnt_exist(self):
+        # Given a tenant not exists
+        self.populate()
+
+        # But the user tries to create a wim_account anyway
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant999'), uuid('wim0')), {
+                'wim_account': {
+                    'user': 'user999',
+                    'password': 'password999'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+        # the message should be useful
+        response.mustcontain('No record was found', uuid('tenant999'))
+
+    def test_create_wim__wim_doesnt_exist(self):
+        # Given a tenant not exists
+        self.populate()
+
+        # But the user tries to create a wim_account anyway
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim999')), {
+                'wim_account': {
+                    'user': 'user999',
+                    'password': 'password999'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+        # the message should be useful
+        response.mustcontain('No record was found', uuid('wim999'))
+
+    def test_update_wim_account(self):
+        # Given a WIM account connecting a tenant and a WIM exists
+        self.populate()
+
+        with self.engine.threads_running():
+            num_threads = len(self.engine.threads)
+
+            thread = self.engine.threads[uuid('wim-account00')]
+            reload = MagicMock(wraps=thread.reload)
+
+            with patch.object(thread, 'reload', reload):
+                # when a PUT /<tenant_id>/wims/<wim_id> arrives
+                response = self.app.put_json(
+                    '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+                        'wim_account': {
+                            'name': 'account888',
+                            'user': 'user888'}})
+
+            num_threads_after = len(self.engine.threads)
+
+        # then the wim thread should be restarted
+        reload.assert_called_once()
+        # and no thread should be added or removed
+        self.assertEqual(num_threads_after, num_threads)
+
+        # and the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim_account']['name'], 'account888')
+        self.assertEqual(response.json['wim_account']['user'], 'user888')
+
+    def test_update_wim_account__multiple(self):
+        # Given a WIM account connected to several tenants
+        self.populate()
+
+        with self.engine.threads_running():
+            # when a PUT /any/wims/<wim_id> arrives
+            response = self.app.put_json(
+                '/any/wims/{}'.format(uuid('wim0')), {
+                    'wim_account': {
+                        'user': 'user888',
+                        'config': {'x': 888}}})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(len(response.json['wim_accounts']), eg.NUM_TENANTS)
+
+        for account in response.json['wim_accounts']:
+            self.assertEqual(account['user'], 'user888')
+            self.assertEqual(account['config']['x'], 888)
+
+    def test_delete_wim_account(self):
+        # Given a WIM account exists and it is connected to a tenant
+        self.populate()
+
+        num_accounts_before = self.count('wim_accounts')
+
+        with self.engine.threads_running():
+            thread = self.engine.threads[uuid('wim-account00')]
+            exit = MagicMock(wraps=thread.exit)
+            num_threads = len(self.engine.threads)
+
+            with patch.object(thread, 'exit', exit):
+                # when a PUT /<tenant_id>/wims/<wim_id> arrives
+                response = self.app.delete_json(
+                    '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')))
+
+            num_threads_after = len(self.engine.threads)
+
+        # then the wim thread should exit
+        self.assertEqual(num_threads_after, num_threads - 1)
+        exit.assert_called_once()
+
+        # and the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        response.mustcontain('account `wim-account00` deleted')
+
+        # and the number of wim_accounts should decrease
+        num_accounts_after = self.count('wim_accounts')
+        self.assertEqual(num_accounts_after, num_accounts_before - 1)
+
+    def test_delete_wim_account__multiple(self):
+        # Given a WIM account exists and it is connected to several tenants
+        self.populate()
+
+        num_accounts_before = self.count('wim_accounts')
+
+        with self.engine.threads_running():
+            # when a PUT /<tenant_id>/wims/<wim_id> arrives
+            response = self.app.delete_json(
+                '/any/wims/{}'.format(uuid('wim0')))
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        response.mustcontain('account `wim-account00` deleted')
+        response.mustcontain('account `wim-account10` deleted')
+
+        # and the number of wim_accounts should decrease
+        num_accounts_after = self.count('wim_accounts')
+        self.assertEqual(num_accounts_after,
+                         num_accounts_before - eg.NUM_TENANTS)
+
+    def test_delete_wim_account__doesnt_exist(self):
+        # Given we have a tenant that is not connected to a WIM
+        self.populate()
+        tenant = {'uuid': uuid('tenant888'), 'name': 'tenant888'}
+        self.populate([{'nfvo_tenants': [tenant]}])
+
+        num_accounts_before = self.count('wim_accounts')
+
+        # when a PUT /<tenant_id>/wims/<wim_id> arrives
+        response = self.app.delete(
+            '/{}/wims/{}'.format(uuid('tenant888'), uuid('wim0')),
+            expect_errors=True)
+
+        # then the request should not succeed
+        self.assertEqual(response.status_code, Not_Found)
+
+        # and the number of wim_accounts should not decrease
+        num_accounts_after = self.count('wim_accounts')
+        self.assertEqual(num_accounts_after, num_accounts_before)
+
+    def test_create_port_mappings(self):
+        # Given we have a wim and datacenter without any port mappings
+        self.populate([{'nfvo_tenants': eg.tenant(0)}] +
+                      eg.datacenter_set(888, 0) +
+                      eg.wim_set(999, 0))
+
+        # when a POST /<tenant_id>/wims/<wim_id>/port_mapping arrives
+        response = self.app.post_json(
+            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim999')),
+            {'wim_port_mapping': [{
+                'datacenter_name': 'dc888',
+                'pop_wan_mappings': [
+                    {'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
+                     'pop_switch_port': 1,
+                     'wan_service_mapping_info': {
+                         'mapping_type': 'dpid-port',
+                         'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:BB',
+                         'wan_switch_port': 1
+                     }}
+                ]}
+            ]})
+
+        # the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and port mappings should be stored in the database
+        port_mapping = self.db.get_rows(FROM='wim_port_mappings')
+        self.assertEqual(len(port_mapping), 1)
+
+    def test_get_port_mappings(self):
+        # Given WIMS and datacenters exist with port mappings between them
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id>/port_mapping arrives
+        response = self.app.get(
+            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
+        # the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and we should see port mappings for each WIM, datacenter pair
+        mappings = response.json['wim_port_mapping']
+        self.assertEqual(len(mappings), eg.NUM_DATACENTERS)
+        # ^  In the fixture set all the datacenters are connected to all wims
+
+    def test_delete_port_mappings(self):
+        # Given WIMS and datacenters exist with port mappings between them
+        self.populate()
+        num_mappings_before = self.count('wim_port_mappings')
+
+        # when a DELETE /<tenant_id>/wims/<wim_id>/port_mapping arrives
+        response = self.app.delete(
+            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
+        # the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the number of port mappings should decrease
+        num_mappings_after = self.count('wim_port_mappings')
+        self.assertEqual(num_mappings_after,
+                         num_mappings_before - eg.NUM_DATACENTERS)
+        # ^  In the fixture set all the datacenters are connected to all wims
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/wim/tests/test_persistence.py b/RO/osm_ro/wim/tests/test_persistence.py
new file mode 100644 (file)
index 0000000..ecca4fa
--- /dev/null
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+from itertools import chain
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+    TestCaseWithDatabasePerTest,
+    disable_foreign_keys,
+    uuid
+)
+from ..persistence import (
+    WimPersistence,
+    hide_confidential_fields,
+    serialize_fields,
+    unserialize_fields
+)
+
+
+class TestPersistenceUtils(unittest.TestCase):
+    def test_hide_confidential_fields(self):
+        example = {
+            'password': '123456',
+            'nested.password': '123456',
+            'nested.secret': None,
+        }
+        result = hide_confidential_fields(example,
+                                          fields=('password', 'secret'))
+        for field in 'password', 'nested.password':
+            assert result[field].startswith('***')
+        self.assertIs(result['nested.secret'], None)
+
+    def test_serialize_fields(self):
+        example = {
+            'config': dict(x=1),
+            'nested.info': [1, 2, 3],
+            'nested.config': None
+        }
+        result = serialize_fields(example, fields=('config', 'info'))
+        for field in 'config', 'nested.info':
+            self.assertIsInstance(result[field], str)
+        self.assertIs(result['nested.config'], None)
+
+    def test_unserialize_fields(self):
+        example = {
+            'config': '{"x": 1}',
+            'nested.info': '[1,2,3]',
+            'nested.config': None,
+            'confidential.info': '{"password": "abcdef"}'
+        }
+        result = unserialize_fields(example, fields=('config', 'info'))
+        self.assertEqual(result['config'], dict(x=1))
+        self.assertEqual(result['nested.info'], [1, 2, 3])
+        self.assertIs(result['nested.config'], None)
+        self.assertNotEqual(result['confidential.info']['password'], 'abcdef')
+        assert result['confidential.info']['password'].startswith('***')
+
+
+class TestWimPersistence(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestWimPersistence, self).setUp()
+        self.persist = WimPersistence(self.db)
+
+    def populate(self, seeds=None):
+        super(TestWimPersistence, self).populate(seeds or eg.consistent_set())
+
+    def test_query_offset(self):
+        # Given a database contains 4 records
+        self.populate([{'wims': [eg.wim(i) for i in range(4)]}])
+
+        # When we query using a limit of 2 and a offset of 1
+        results = self.persist.query('wims',
+                                     ORDER_BY='name', LIMIT=2, OFFSET=1)
+        # Then we should have 2 results, skipping the first record
+        names = [r['name'] for r in results]
+        self.assertItemsEqual(names, ['wim1', 'wim2'])
+
+    def test_get_wim_account_by_wim_tenant(self):
+        # Given a database contains WIM accounts associated to Tenants
+        self.populate()
+
+        # when we retrieve the account using wim and tenant
+        wim_account = self.persist.get_wim_account_by(
+            uuid('wim0'), uuid('tenant0'))
+
+        # then the right record should be returned
+        self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
+        self.assertEqual(wim_account['name'], 'wim-account00')
+        self.assertEqual(wim_account['user'], 'user00')
+
+    def test_get_wim_account_by_wim_tenant__names(self):
+        # Given a database contains WIM accounts associated to Tenants
+        self.populate()
+
+        # when we retrieve the account using wim and tenant
+        wim_account = self.persist.get_wim_account_by(
+            'wim0', 'tenant0')
+
+        # then the right record should be returned
+        self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
+        self.assertEqual(wim_account['name'], 'wim-account00')
+        self.assertEqual(wim_account['user'], 'user00')
+
+    def test_get_wim_accounts_by_wim(self):
+        # Given a database contains WIM accounts associated to Tenants
+        self.populate()
+
+        # when we retrieve the accounts using wim
+        wim_accounts = self.persist.get_wim_accounts_by(uuid('wim0'))
+
+        # then the right records should be returned
+        self.assertEqual(len(wim_accounts), eg.NUM_TENANTS)
+        for account in wim_accounts:
+            self.assertEqual(account['wim_id'], uuid('wim0'))
+
+    def test_get_wim_port_mappings(self):
+        # Given a database with WIMs, datacenters and port-mappings
+        self.populate()
+
+        # when we retrieve the port mappings for a list of datacenters
+        # using either names or uuids
+        for criteria in ([uuid('dc0'), uuid('dc1')], ['dc0', 'dc1']):
+            mappings = self.persist.get_wim_port_mappings(datacenter=criteria)
+
+            # then each result should have a datacenter_id
+            datacenters = [m['datacenter_id'] for m in mappings]
+            for datacenter in datacenters:
+                self.assertIn(datacenter, [uuid('dc0'), uuid('dc1')])
+
+            # a wim_id
+            wims = [m['wim_id'] for m in mappings]
+            for wim in wims:
+                self.assertIsNot(wim, None)
+
+            # and a array of pairs 'wan' <> 'pop' connections
+            pairs = chain(*(m['pop_wan_mappings'] for m in mappings))
+            self.assertEqual(len(list(pairs)), 2 * eg.NUM_WIMS)
+
+    def test_get_wim_port_mappings_multiple(self):
+        # Given we have more then one connection in a datacenter managed by the
+        # WIM
+        self.populate()
+        self.populate([{
+            'wim_port_mappings': [
+                eg.wim_port_mapping(
+                    0, 0,
+                    pop_dpid='CC:CC:CC:CC:CC:CC:CC:CC',
+                    wan_dpid='DD:DD:DD:DD:DD:DD:DD:DD'),
+                eg.wim_port_mapping(
+                    0, 0,
+                    pop_dpid='EE:EE:EE:EE:EE:EE:EE:EE',
+                    wan_dpid='FF:FF:FF:FF:FF:FF:FF:FF')]}])
+
+        # when we retrieve the port mappings for the wim and datacenter:
+        mappings = (
+            self.persist.get_wim_port_mappings(wim='wim0', datacenter='dc0'))
+
+        # then it should return just a single result, grouped by wim and
+        # datacenter
+        self.assertEqual(len(mappings), 1)
+        self.assertEqual(mappings[0]['wim_id'], uuid('wim0'))
+        self.assertEqual(mappings[0]['datacenter_id'], uuid('dc0'))
+
+        self.assertEqual(len(mappings[0]['pop_wan_mappings']), 3)
+
+        # when we retreive the mappings for more then one wim/datacenter
+        # the grouping should still work properly
+        mappings = self.persist.get_wim_port_mappings(
+            wim=['wim0', 'wim1'], datacenter=['dc0', 'dc1'])
+        self.assertEqual(len(mappings), 4)
+        pairs = chain(*(m['pop_wan_mappings'] for m in mappings))
+        self.assertEqual(len(list(pairs)), 6)
+
+    def test_get_actions_in_group(self):
+        # Given a good number of wim actions exist in the database
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
+                   eg.wim_actions('FIND', num_links=8, **kwargs) +
+                   eg.wim_actions('START', num_links=8, **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we retrieve them in groups
+        limit = 5
+        results = self.persist.get_actions_in_groups(
+            uuid('wim-account00'), ['instance_wim_nets'], group_limit=limit)
+
+        # Then we should have N groups where N == limit
+        self.assertEqual(len(results), limit)
+        for _, task_list in results:
+            # And since for each link we have create 3 actions (create, find,
+            # start), we should find them in each group
+            self.assertEqual(len(task_list), 3)
+
+    @disable_foreign_keys
+    def test_update_instance_action_counters(self):
+        # Given we have one instance action in the database with 2 incomplete
+        # tasks
+        action = eg.instance_action(num_tasks=2)
+        self.populate([{'instance_actions': action}])
+        # When we update the done counter by 0, nothing should happen
+        self.persist.update_instance_action_counters(action['uuid'], done=0)
+        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+        self.assertEqual(result['number_done'], 0)
+        self.assertEqual(result['number_failed'], 0)
+        # When we update the done counter by 2, number_done should be 2
+        self.persist.update_instance_action_counters(action['uuid'], done=2)
+        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+        self.assertEqual(result['number_done'], 2)
+        self.assertEqual(result['number_failed'], 0)
+        # When we update the done counter by -1, and the failed counter by 1
+        self.persist.update_instance_action_counters(
+            action['uuid'], done=-1, failed=1)
+        # Then we should see 1 and 1
+        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+        self.assertEqual(result['number_done'], 1)
+        self.assertEqual(result['number_failed'], 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/wim/tests/test_wim_thread.py b/RO/osm_ro/wim/tests/test_wim_thread.py
new file mode 100644 (file)
index 0000000..b8c8231
--- /dev/null
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import unittest
+from difflib import unified_diff
+from operator import itemgetter
+from time import time
+
+import json
+
+from unittest.mock import MagicMock, patch
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+    TestCaseWithDatabasePerTest,
+    disable_foreign_keys,
+    uuid
+)
+from ..engine import WimEngine
+from ..persistence import WimPersistence
+from ..wim_thread import WimThread
+
+
+ignore_connector = patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())
+
+
+def _repr(value):
+    return json.dumps(value, indent=4, sort_keys=True)
+
+
+@ignore_connector
+class TestWimThreadWithDb(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestWimThreadWithDb, self).setUp()
+        self.persist = WimPersistence(self.db)
+        wim = eg.wim(0)
+        account = eg.wim_account(0, 0)
+        account['wim'] = wim
+        self.thread = WimThread(self.persist, account)
+        self.thread.connector = MagicMock()
+
+    def assertTasksEqual(self, left, right):
+        fields = itemgetter('item', 'item_id', 'action', 'status')
+        left_ = (t.as_dict() for t in left)
+        left_ = [fields(t) for t in left_]
+        right_ = [fields(t) for t in right]
+
+        try:
+            self.assertItemsEqual(left_, right_)
+        except AssertionError:
+            print('left', _repr(left))
+            print('left', len(left_), 'items')
+            print('right', len(right_), 'items')
+            result = list(unified_diff(_repr(sorted(left_)).split('\n'),
+                                       _repr(sorted(right_)).split('\n'),
+                                       'left', 'right'))
+            print('diff:\n', '\n'.join(result))
+            raise
+
+    def test_reload_actions__all_create(self):
+        # Given we have 3 CREATE actions stored in the database
+        actions = eg.wim_actions('CREATE',
+                                 action_id=uuid('action0'), num_links=3)
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions':
+                eg.instance_action(action_id=uuid('action0'))},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # All of them should be inserted as pending
+        self.assertTasksEqual(self.thread.pending_tasks, actions)
+
+    def test_reload_actions__all_refresh(self):
+        # Given just DONE tasks are in the database
+        actions = eg.wim_actions(status='DONE',
+                                 action_id=uuid('action0'), num_links=3)
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions':
+                eg.instance_action(action_id=uuid('action0'))},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # All of them should be inserted as refresh
+        self.assertTasksEqual(self.thread.refresh_tasks, actions)
+
+    def test_reload_actions__grouped(self):
+        # Given we have 2 tasks for the same item in the database
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', **kwargs) +
+                   eg.wim_actions('FIND', **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # Just one group should be created
+        self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
+
+    def test_reload_actions__delete_scheduled(self):
+        # Given we have 3 tasks for the same item in the database, but one of
+        # them is a DELETE task and it is SCHEDULED
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', **kwargs) +
+                   eg.wim_actions('FIND', **kwargs) +
+                   eg.wim_actions('DELETE', status='SCHEDULED', **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # Just one group should be created
+        self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
+
+    def test_reload_actions__delete_done(self):
+        # Given we have 3 tasks for the same item in the database, but one of
+        # them is a DELETE task and it is not SCHEDULED
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', **kwargs) +
+                   eg.wim_actions('FIND', **kwargs) +
+                   eg.wim_actions('DELETE', status='DONE', **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # No pending task should be found
+        self.assertEqual(self.thread.pending_tasks, [])
+
+    def test_reload_actions__batch(self):
+        # Given the group_limit is 10, and we have 24
+        group_limit = 10
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
+                   eg.wim_actions('FIND', num_links=8, **kwargs) +
+                   eg.wim_actions('FIND', num_links=8, **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions(group_limit)
+
+        # Then we should still see the actions in memory properly
+        self.assertTasksEqual(self.thread.pending_tasks, actions)
+        self.assertEqual(len(self.thread.grouped_tasks.values()), 8)
+
+    @disable_foreign_keys
+    def test_process_list__refresh(self):
+        update_wan_link = MagicMock(wrap=self.persist.update_wan_link)
+        update_action = MagicMock(wrap=self.persist.update_wan_link)
+        patches = dict(update_wan_link=update_wan_link,
+                       update_action=update_action)
+
+        with patch.multiple(self.persist, **patches):
+            # Given we have 2 tasks in the refresh queue
+            kwargs = {'action_id': uuid('action0')}
+            actions = (eg.wim_actions('FIND', 'DONE', **kwargs) +
+                       eg.wim_actions('CREATE', 'BUILD', **kwargs))
+            for i, action in enumerate(actions):
+                action['task_index'] = i
+
+            self.populate(
+                [{'instance_wim_nets': eg.instance_wim_nets()}] +
+                [{'instance_actions':
+                    eg.instance_action(num_tasks=2, **kwargs)}] +
+                [{'vim_wim_actions': actions}])
+
+            self.thread.insert_pending_tasks(actions)
+
+            # When we process the refresh list
+            processed = self.thread.process_list('refresh')
+
+            # Then we should have 2 updates
+            self.assertEqual(processed, 2)
+
+            # And the database should be updated accordingly
+            self.assertEqual(update_wan_link.call_count, 2)
+            self.assertEqual(update_action.call_count, 2)
+
+    @disable_foreign_keys
+    def test_delete_superseed_create(self):
+        # Given we insert a scheduled CREATE task
+        instance_action = eg.instance_action(num_tasks=1)
+        self.thread.pending_tasks = []
+        engine = WimEngine(persistence=self.persist)
+        self.addCleanup(engine.stop_threads)
+        wan_links = eg.instance_wim_nets()
+        create_actions = engine.create_actions(wan_links)
+        delete_actions = engine.delete_actions(wan_links)
+        engine.incorporate_actions(create_actions + delete_actions,
+                                   instance_action)
+
+        self.populate(instance_actions=instance_action,
+                      vim_wim_actions=create_actions + delete_actions)
+
+        self.thread.insert_pending_tasks(create_actions)
+
+        assert self.thread.pending_tasks[0].is_scheduled
+
+        # When we insert the equivalent DELETE task
+        self.thread.insert_pending_tasks(delete_actions)
+
+        # Then the CREATE task should be superseded
+        self.assertEqual(self.thread.pending_tasks[0].action, 'CREATE')
+        assert self.thread.pending_tasks[0].is_superseded
+
+        self.thread.process_list('pending')
+        self.thread.process_list('refresh')
+        self.assertFalse(self.thread.pending_tasks)
+
+
+@ignore_connector
+class TestWimThread(unittest.TestCase):
+    def setUp(self):
+        wim = eg.wim(0)
+        account = eg.wim_account(0, 0)
+        account['wim'] = wim
+        self.persist = MagicMock()
+        self.thread = WimThread(self.persist, account)
+        self.thread.connector = MagicMock()
+
+        super(TestWimThread, self).setUp()
+
+    def test_process_refresh(self):
+        # Given we have 30 tasks in the refresh queue
+        kwargs = {'action_id': uuid('action0')}
+        actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
+        self.thread.insert_pending_tasks(actions)
+
+        # When we process the refresh list
+        processed = self.thread.process_list('refresh')
+
+        # Then we should have REFRESH_BATCH updates
+        self.assertEqual(processed, self.thread.BATCH)
+
+    def test_process_refresh__with_superseded(self):
+        # Given we have 30 tasks but 15 of them are superseded
+        kwargs = {'action_id': uuid('action0')}
+        actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
+        self.thread.insert_pending_tasks(actions)
+        for task in self.thread.refresh_tasks[0:30:2]:
+            task.status = 'SUPERSEDED'
+
+        now = time()
+
+        # When we call the refresh_elements
+        processed = self.thread.process_list('refresh')
+
+        # Then we should have 25 updates (since SUPERSEDED updates are cheap,
+        # they are not counted for the limits)
+        self.assertEqual(processed, 25)
+
+        # The SUPERSEDED tasks should be removed, 5 tasks should be untouched,
+        # and 10 tasks should be rescheduled
+        refresh_tasks = self.thread.refresh_tasks
+        old = [t for t in refresh_tasks if t.process_at <= now]
+        new = [t for t in refresh_tasks if t.process_at > now]
+        self.assertEqual(len(old), 5)
+        self.assertEqual(len(new), 10)
+        self.assertEqual(len(self.thread.refresh_tasks), 15)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/RO/osm_ro/wim/tox.ini b/RO/osm_ro/wim/tox.ini
new file mode 100644 (file)
index 0000000..29f1a8f
--- /dev/null
@@ -0,0 +1,58 @@
+# This tox file allows the devs to run unit tests only for this subpackage.
+# In order to do so, cd into the directory and run `tox`
+
+[tox]
+minversion = 1.8
+envlist = py27,flake8,radon
+skipsdist = True
+
+[testenv]
+passenv = *_DB_*
+setenv =
+    PATH = {env:PATH}:{toxinidir}/../../database_utils
+    DBUTILS = {toxinidir}/../../database_utils
+changedir = {toxinidir}
+commands =
+    nosetests -v -d {posargs:tests}
+deps =
+    WebTest
+    logging
+    bottle
+    coverage
+    jsonschema
+    mock
+    mysqlclient
+    nose
+    six
+    PyYaml
+    paramiko
+    ipdb
+    requests
+
+[testenv:flake8]
+changedir = {toxinidir}
+deps = flake8
+commands = flake8 {posargs:.}
+
+[testenv:radon]
+changedir = {toxinidir}
+deps = radon
+commands =
+    radon cc --show-complexity --total-average {posargs:.}
+    radon mi -s {posargs:.}
+
+[coverage:run]
+branch = True
+source = {toxinidir}
+omit =
+    tests
+    tests/*
+    */test_*
+    .tox/*
+
+[coverage:report]
+show_missing = True
+
+[flake8]
+exclude =
+    .tox
diff --git a/RO/osm_ro/wim/wan_link_actions.py b/RO/osm_ro/wim/wan_link_actions.py
new file mode 100644 (file)
index 0000000..0d878b2
--- /dev/null
@@ -0,0 +1,440 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101,E0203,W0201
+import json
+from pprint import pformat
+from sys import exc_info
+from time import time
+
+from ..utils import filter_dict_keys as filter_keys
+from ..utils import merge_dicts, remove_none_items, safe_get, truncate
+from .actions import CreateAction, DeleteAction, FindAction
+from .errors import (
+    InconsistentState,
+    NoRecordFound,
+    NoExternalPortFound
+)
+from .wimconn import WimConnectorError
+
+INSTANCE_NET_STATUS_ERROR = ('DOWN', 'ERROR', 'VIM_ERROR',
+                             'DELETED', 'SCHEDULED_DELETION')
+INSTANCE_NET_STATUS_PENDING = ('BUILD', 'INACTIVE', 'SCHEDULED_CREATION')
+INSTANCE_VM_STATUS_ERROR = ('ERROR', 'VIM_ERROR',
+                            'DELETED', 'SCHEDULED_DELETION')
+
+
+class RefreshMixin(object):
+    def refresh(self, connector, persistence):
+        """Ask the external WAN Infrastructure Manager system for updates on
+        the status of the task.
+
+        Arguments:
+            connector: object with API for accessing the WAN
+                Infrastructure Manager system
+            persistence: abstraction layer for the database
+        """
+        fields = ('wim_status', 'wim_info', 'error_msg')
+        result = dict.fromkeys(fields)
+
+        try:
+            result.update(
+                connector
+                .get_connectivity_service_status(self.wim_internal_id))
+        except WimConnectorError as ex:
+            self.logger.exception(ex)
+            result.update(wim_status='WIM_ERROR', error_msg=truncate(ex))
+
+        result = filter_keys(result, fields)
+
+        action_changes = remove_none_items({
+            'extra': merge_dicts(self.extra, result),
+            'status': 'BUILD' if result['wim_status'] == 'BUILD' else None,
+            'error_msg': result['error_msg'],
+            'modified_at': time()})
+        link_changes = merge_dicts(result, status=result.pop('wim_status'))
+        # ^  Rename field: wim_status => status
+
+        persistence.update_wan_link(self.item_id,
+                                    remove_none_items(link_changes))
+
+        self.save(persistence, **action_changes)
+
+        return result
+
+
+class WanLinkCreate(RefreshMixin, CreateAction):
+    def fail(self, persistence, reason, status='FAILED'):
+        changes = {'status': 'ERROR', 'error_msg': truncate(reason)}
+        persistence.update_wan_link(self.item_id, changes)
+        return super(WanLinkCreate, self).fail(persistence, reason, status)
+
+    def process(self, connector, persistence, ovim):
+        """Process the current task.
+        First we check if all the dependencies are ready,
+        then we call ``execute`` to actually execute the action.
+
+        Arguments:
+            connector: object with API for accessing the WAN
+                Infrastructure Manager system
+            persistence: abstraction layer for the database
+            ovim: instance of openvim, abstraction layer that enable
+                SDN-related operations
+        """
+        wan_link = persistence.get_by_uuid('instance_wim_nets', self.item_id)
+
+        # First we check if all the dependencies are solved
+        instance_nets = persistence.get_instance_nets(
+            wan_link['instance_scenario_id'], wan_link['sce_net_id'])
+
+        try:
+            dependency_statuses = [n['status'] for n in instance_nets]
+        except KeyError:
+            self.logger.debug('`status` not found in\n\n%s\n\n',
+                              json.dumps(instance_nets, indent=4))
+        errored = [instance_nets[i]
+                   for i, status in enumerate(dependency_statuses)
+                   if status in INSTANCE_NET_STATUS_ERROR]
+        if errored:
+            return self.fail(
+                persistence,
+                'Impossible to stablish WAN connectivity due to an issue '
+                'with the local networks:\n\t' +
+                '\n\t'.join('{uuid}: {status}'.format(**n) for n in errored))
+
+        pending = [instance_nets[i]
+                   for i, status in enumerate(dependency_statuses)
+                   if status in INSTANCE_NET_STATUS_PENDING]
+        if pending:
+            return self.defer(
+                persistence,
+                'Still waiting for the local networks to be active:\n\t' +
+                '\n\t'.join('{uuid}: {status}'.format(**n) for n in pending))
+
+        return self.execute(connector, persistence, ovim, instance_nets)
+
+    def _get_connection_point_info(self, persistence, ovim, instance_net):
+        """Retrieve information about the connection PoP <> WAN
+
+        Arguments:
+            persistence: object that encapsulates persistence logic
+                (e.g. db connection)
+            ovim: object that encapsulates network management logic (openvim)
+            instance_net: record with the information about a local network
+                (inside a VIM). This network will be connected via a WAN link
+                to a different network in a distinct VIM.
+                This method is used to trace what would be the way this network
+                can be accessed from the outside world.
+
+        Returns:
+            dict: Record representing the wan_port_mapping associated to the
+                  given instance_net. The expected fields are:
+                  **wim_id**, **datacenter_id**, **pop_switch_dpid** (the local
+                  network is expected to be connected at this switch),
+                  **pop_switch_port**, **wan_service_endpoint_id**,
+                  **wan_service_mapping_info**.
+        """
+        # First, we need to find a route from the datacenter to the outside
+        # world. For that, we can use the rules given in the datacenter
+        # configuration:
+        datacenter_id = instance_net['datacenter_id']
+        datacenter = persistence.get_datacenter_by(datacenter_id)
+        rules = safe_get(datacenter, 'config.external_connections', {}) or {}
+        vim_info = instance_net.get('vim_info', {}) or {}
+        # Alternatively, we can look for it, using the SDN assist
+        external_port = (self._evaluate_rules(rules, vim_info) or
+                         self._get_port_sdn(ovim, instance_net))
+
+        if not external_port:
+            raise NoExternalPortFound(instance_net)
+
+        # Then, we find the WAN switch that is connected to this external port
+        try:
+            wim_account = persistence.get_wim_account_by(
+                uuid=self.wim_account_id)
+
+            criteria = {
+                'wim_id': wim_account['wim_id'],
+                'pop_switch_dpid': external_port[0],
+                'pop_switch_port': external_port[1],
+                'datacenter_id': datacenter_id}
+
+            wan_port_mapping = persistence.query_one(
+                FROM='wim_port_mappings',
+                WHERE=criteria)
+        except NoRecordFound as e:
+            ex = InconsistentState('No WIM port mapping found:'
+                                   'wim_account: {}\ncriteria:\n{}'.format(
+                                       self.wim_account_id, pformat(criteria)))
+            raise ex from e
+
+        # It is important to return encapsulation information if present
+        mapping = merge_dicts(
+            wan_port_mapping.get('wan_service_mapping_info'),
+            filter_keys(vim_info, ('encapsulation_type', 'encapsulation_id'))
+        )
+
+        return merge_dicts(wan_port_mapping, wan_service_mapping_info=mapping)
+
+    def _get_port_sdn(self, ovim, instance_net):
+        criteria = {'net_id': instance_net['sdn_net_id']}
+        try:
+            local_port_mapping = ovim.get_ports(filter=criteria)
+
+            if local_port_mapping:
+                return (local_port_mapping[0]['switch_dpid'],
+                        local_port_mapping[0]['switch_port'])
+        except:  # noqa
+            self.logger.exception('Problems when calling OpenVIM')
+
+        self.logger.debug('No ports found using criteria:\n%r\n.', criteria)
+        return None
+
+    def _evaluate_rules(self, rules, vim_info):
+        """Given a ``vim_info`` dict from a ``instance_net`` record, evaluate
+        the set of rules provided during the VIM/datacenter registration to
+        determine an external port used to connect that VIM/datacenter to
+        other ones where different parts of the NS will be instantiated.
+
+        For example, considering a VIM/datacenter is registered like the
+        following::
+
+            vim_record = {
+              "uuid": ...
+              ...  # Other properties associated with the VIM/datacenter
+              "config": {
+                ...  # Other configuration
+                "external_connections": [
+                  {
+                    "condition": {
+                      "provider:physical_network": "provider_net1",
+                      ...  # This method will look up all the keys listed here
+                           # in the instance_nets.vim_info dict and compare the
+                           # values. When all the values match, the associated
+                           # vim_external_port will be selected.
+                    },
+                    "vim_external_port": {"switch": "switchA", "port": "portB"}
+                  },
+                  ...  # The user can provide as many rules as needed, however
+                       # only the first one to match will be applied.
+                ]
+              }
+            }
+
+        When an ``instance_net`` record is instantiated in that datacenter with
+        the following information::
+
+            instance_net = {
+              "uuid": ...
+              ...
+              "vim_info": {
+                ...
+                "provider_physical_network": "provider_net1",
+              }
+            }
+
+        Then, ``switchA`` and ``portB`` will be used to stablish the WAN
+        connection.
+
+        Arguments:
+            rules (list): Set of dicts containing the keys ``condition`` and
+                ``vim_external_port``. This list should be extracted from
+                ``vim['config']['external_connections']`` (as stored in the
+                database).
+            vim_info (dict): Information given by the VIM Connector, against
+               which the rules will be evaluated.
+
+        Returns:
+            tuple: switch id (local datacenter switch) and port or None if
+                the rule does not match.
+        """
+        rule = next((r for r in rules if self._evaluate_rule(r, vim_info)), {})
+        if 'vim_external_port' not in rule:
+            self.logger.debug('No external port found.\n'
+                              'rules:\n%r\nvim_info:\n%r\n\n', rules, vim_info)
+            return None
+
+        return (rule['vim_external_port']['switch'],
+                rule['vim_external_port']['port'])
+
+    @staticmethod
+    def _evaluate_rule(rule, vim_info):
+        """Evaluate the conditions from a single rule to ``vim_info`` and
+        determine if the rule should be applicable or not.
+
+        Please check :obj:`~._evaluate_rules` for more information.
+
+        Arguments:
+            rule (dict): Data structure containing the keys ``condition`` and
+                ``vim_external_port``. This should be one of the elements in
+                ``vim['config']['external_connections']`` (as stored in the
+                database).
+            vim_info (dict): Information given by the VIM Connector, against
+               which the rules will be evaluated.
+
+        Returns:
+            True or False: If all the conditions are met.
+        """
+        condition = rule.get('condition', {}) or {}
+        return all(safe_get(vim_info, k) == v for k, v in condition.items())
+
+    @staticmethod
+    def _derive_connection_point(wan_info):
+        point = {'service_endpoint_id': wan_info['wan_service_endpoint_id']}
+        # TODO: Cover other scenarios, e.g. VXLAN.
+        details = wan_info.get('wan_service_mapping_info', {})
+        if details.get('encapsulation_type') == 'vlan':
+            point['service_endpoint_encapsulation_type'] = 'dot1q'
+            point['service_endpoint_encapsulation_info'] = {
+                'vlan': details['encapsulation_id']
+            }
+        else:
+            point['service_endpoint_encapsulation_type'] = 'none'
+        return point
+
+    @staticmethod
+    def _derive_service_type(connection_points):
+        # TODO: add multipoint and L3 connectivity.
+        if len(connection_points) == 2:
+            return 'ELINE'
+        else:
+            raise NotImplementedError('Multipoint connectivity is not '
+                                      'supported yet.')
+
+    def _update_persistent_data(self, persistence, service_uuid, conn_info):
+        """Store plugin/connector specific information in the database"""
+        persistence.update_wan_link(self.item_id, {
+            'wim_internal_id': service_uuid,
+            'wim_info': {'conn_info': conn_info},
+            'status': 'BUILD'})
+
+    def execute(self, connector, persistence, ovim, instance_nets):
+        """Actually execute the action, since now we are sure all the
+        dependencies are solved
+        """
+        try:
+            wan_info = (self._get_connection_point_info(persistence, ovim, net)
+                        for net in instance_nets)
+            connection_points = [self._derive_connection_point(w)
+                                 for w in wan_info]
+
+            uuid, info = connector.create_connectivity_service(
+                self._derive_service_type(connection_points),
+                connection_points
+                # TODO: other properties, e.g. bandwidth
+            )
+        except (WimConnectorError, InconsistentState,
+                NoExternalPortFound) as ex:
+            self.logger.exception(ex)
+            return self.fail(
+                persistence,
+                'Impossible to stablish WAN connectivity.\n\t{}'.format(ex))
+
+        self.logger.debug('WAN connectivity established %s\n%s\n',
+                          uuid, json.dumps(info, indent=4))
+        self.wim_internal_id = uuid
+        self._update_persistent_data(persistence, uuid, info)
+        self.succeed(persistence)
+        return uuid
+
+
+class WanLinkDelete(DeleteAction):
+    def succeed(self, persistence):
+        try:
+            persistence.update_wan_link(self.item_id, {'status': 'DELETED'})
+        except NoRecordFound:
+            self.logger.debug('%s(%s) record already deleted',
+                              self.item, self.item_id)
+
+        return super(WanLinkDelete, self).succeed(persistence)
+
+    def get_wan_link(self, persistence):
+        """Retrieve information about the wan_link
+
+        It might be cached, or arrive from the database
+        """
+        if self.extra.get('wan_link'):
+            # First try a cached version of the data
+            return self.extra['wan_link']
+
+        return persistence.get_by_uuid(
+            'instance_wim_nets', self.item_id)
+
+    def process(self, connector, persistence, ovim):
+        """Delete a WAN link previously created"""
+        wan_link = self.get_wan_link(persistence)
+        if 'ERROR' in (wan_link.get('status') or ''):
+            return self.fail(
+                persistence,
+                'Impossible to delete WAN connectivity, '
+                'it was never successfully established:'
+                '\n\t{}'.format(wan_link['error_msg']))
+
+        internal_id = wan_link.get('wim_internal_id') or self.internal_id
+
+        if not internal_id:
+            self.logger.debug('No wim_internal_id found in\n%s\n%s\n'
+                              'Assuming no network was created yet, '
+                              'so no network have to be deleted.',
+                              json.dumps(wan_link, indent=4),
+                              json.dumps(self.as_dict(), indent=4))
+            return self.succeed(persistence)
+
+        try:
+            id = self.wim_internal_id
+            conn_info = safe_get(wan_link, 'wim_info.conn_info')
+            self.logger.debug('Connection Service %s (wan_link: %s):\n%s\n',
+                              id, wan_link['uuid'],
+                              json.dumps(conn_info, indent=4))
+            result = connector.delete_connectivity_service(id, conn_info)
+        except (WimConnectorError, InconsistentState) as ex:
+            self.logger.exception(ex)
+            return self.fail(
+                persistence,
+                'Impossible to delete WAN connectivity.\n\t{}'.format(ex))
+
+        self.logger.debug('WAN connectivity removed %s', result)
+        self.succeed(persistence)
+
+        return result
+
+
+class WanLinkFind(RefreshMixin, FindAction):
+    pass
+
+
+ACTIONS = {
+    'CREATE': WanLinkCreate,
+    'DELETE': WanLinkDelete,
+    'FIND': WanLinkFind,
+}
diff --git a/RO/osm_ro/wim/wim_thread.py b/RO/osm_ro/wim/wim_thread.py
new file mode 100644 (file)
index 0000000..13502b9
--- /dev/null
@@ -0,0 +1,441 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""
+Thread-based interaction with WIMs. Tasks are stored in the
+database (vim_wim_actions table) and processed sequentially
+
+Please check the Action class for information about the content of each action.
+"""
+
+import logging
+import threading
+from contextlib import contextmanager
+from functools import partial
+from itertools import islice, chain, takewhile
+from operator import itemgetter, attrgetter
+from sys import exc_info
+from time import time, sleep
+
+import queue
+
+from . import wan_link_actions
+from ..utils import ensure, partition, pipe
+from .actions import IGNORE, PENDING, REFRESH
+from .errors import (
+    DbBaseException,
+    QueueFull,
+    InvalidParameters as Invalid,
+    UndefinedAction,
+)
+from .failing_connector import FailingConnector
+from .wimconn import WimConnectorError
+from .wimconn_dynpac import DynpacConnector
+from .wimconn_fake import FakeConnector
+from .wimconn_ietfl2vpn import WimconnectorIETFL2VPN
+
+ACTIONS = {
+    'instance_wim_nets': wan_link_actions.ACTIONS
+}
+
+CONNECTORS = {
+    # "odl": wimconn_odl.OdlConnector,
+    "dynpac": DynpacConnector,
+    "fake": FakeConnector,
+    "tapi": WimconnectorIETFL2VPN,
+    # Add extra connectors here
+}
+
+
+class WimThread(threading.Thread):
+    """Specialized task queue implementation that runs in an isolated thread.
+
+    Objects of this class have a few methods that are intended to be used
+    outside of the thread:
+
+    - start
+    - insert_task
+    - reload
+    - exit
+
+    All the other methods are used internally to manipulate/process the task
+    queue.
+    """
+    RETRY_SCHEDULED = 10  # 10 seconds
+    REFRESH_BUILD = 10    # 10 seconds
+    REFRESH_ACTIVE = 60   # 1 minute
+    BATCH = 10            # 10 actions per round
+    QUEUE_SIZE = 2000
+    RECOVERY_TIME = 5     # Sleep 5s to leave the system some time to recover
+    MAX_RECOVERY_TIME = 180
+    WAITING_TIME = 1      # Wait 1s for taks to arrive, when there are none
+
+    def __init__(self, persistence, wim_account, logger=None, ovim=None):
+        """Init a thread.
+
+        Arguments:
+            persistence: Database abstraction layer
+            wim_account: Record containing wim_account, tenant and wim
+                information.
+        """
+        name = '{}.{}.{}'.format(wim_account['wim']['name'],
+                                 wim_account['name'], wim_account['uuid'])
+        super(WimThread, self).__init__(name=name)
+
+        self.name = name
+        self.connector = None
+        self.wim_account = wim_account
+
+        self.logger = logger or logging.getLogger('openmano.wim.'+self.name)
+        self.persist = persistence
+        self.ovim = ovim
+
+        self.task_queue = queue.Queue(self.QUEUE_SIZE)
+
+        self.refresh_tasks = []
+        """Time ordered task list for refreshing the status of WIM nets"""
+
+        self.pending_tasks = []
+        """Time ordered task list for creation, deletion of WIM nets"""
+
+        self.grouped_tasks = {}
+        """ It contains all the creation/deletion pending tasks grouped by
+        its concrete vm, net, etc
+
+            <item><item_id>:
+                -   <task1>  # e.g. CREATE task
+                    <task2>  # e.g. DELETE task
+        """
+
+        self._insert_task = {
+            PENDING: partial(self.schedule, list_name='pending'),
+            REFRESH: partial(self.schedule, list_name='refresh'),
+            IGNORE: lambda task, *_, **__: task.save(self.persist)}
+        """Send the task to the right processing queue"""
+
+    def on_start(self):
+        """Run a series of procedures every time the thread (re)starts"""
+        self.connector = self.get_connector()
+        self.reload_actions()
+
+    def get_connector(self):
+        """Create an WimConnector instance according to the wim.type"""
+        error_msg = ''
+        account_id = self.wim_account['uuid']
+        try:
+            account = self.persist.get_wim_account_by(
+                uuid=account_id, hide=None)  # Credentials need to be available
+            wim = account['wim']
+            mapping = self.persist.query('wim_port_mappings',
+                                         WHERE={'wim_id': wim['uuid']},
+                                         error_if_none=False)
+            return CONNECTORS[wim['type']](wim, account, {
+                'service_endpoint_mapping': mapping or []
+            })
+        except DbBaseException as ex:
+            error_msg = ('Error when retrieving WIM account ({})\n'
+                         .format(account_id)) + str(ex)
+            self.logger.error(error_msg, exc_info=True)
+        except KeyError as ex:
+            error_msg = ('Unable to find the WIM connector for WIM ({})\n'
+                         .format(wim['type'])) + str(ex)
+            self.logger.error(error_msg, exc_info=True)
+        except (WimConnectorError, Exception) as ex:
+            # TODO: Remove the Exception class here when the connector class is
+            # ready
+            error_msg = ('Error when loading WIM connector for WIM ({})\n'
+                         .format(wim['type'])) + str(ex)
+            self.logger.error(error_msg, exc_info=True)
+
+        error_msg_extra = ('Any task targeting WIM account {} ({}) will fail.'
+                           .format(account_id, self.wim_account.get('name')))
+        self.logger.warning(error_msg_extra)
+        return FailingConnector(error_msg + '\n' + error_msg_extra)
+
+    @contextmanager
+    def avoid_exceptions(self):
+        """Make a real effort to keep the thread alive, by avoiding the
+        exceptions. They are instead logged as a critical errors.
+        """
+        try:
+            yield
+        except Exception as ex:
+            self.logger.critical("Unexpected exception %s", ex, exc_info=True)
+            sleep(self.RECOVERY_TIME)
+
+    def reload_actions(self, group_limit=100):
+        """Read actions from database and reload them at memory.
+
+        This method will clean and reload the attributes ``refresh_tasks``,
+        ``pending_tasks`` and ``grouped_tasks``
+
+        Attributes:
+            group_limit (int): maximum number of action groups (those that
+                refer to the same ``<item, item_id>``) to be retrieved from the
+                database in each batch.
+        """
+
+        # First we clean the cache to let the garbage collector work
+        self.refresh_tasks = []
+        self.pending_tasks = []
+        self.grouped_tasks = {}
+
+        offset = 0
+
+        while True:
+            # Do things in batches
+            task_groups = self.persist.get_actions_in_groups(
+                self.wim_account['uuid'], item_types=('instance_wim_nets',),
+                group_offset=offset, group_limit=group_limit)
+            offset += (group_limit - 1)  # Update for the next batch
+
+            if not task_groups:
+                break
+
+            pending_groups = (g for _, g in task_groups if is_pending_group(g))
+
+            for task_list in pending_groups:
+                with self.avoid_exceptions():
+                    self.insert_pending_tasks(filter_pending_tasks(task_list))
+
+            self.logger.debug(
+                'Reloaded wim actions pending: %d refresh: %d',
+                len(self.pending_tasks), len(self.refresh_tasks))
+
+    def insert_pending_tasks(self, task_list):
+        """Insert task in the list of actions being processed"""
+        task_list = [action_from(task, self.logger) for task in task_list]
+
+        for task in task_list:
+            group = task.group_key
+            self.grouped_tasks.setdefault(group, [])
+            # Each task can try to supersede the other ones,
+            # but just DELETE actions will actually do
+            task.supersede(self.grouped_tasks[group])
+            self.grouped_tasks[group].append(task)
+
+        # We need a separate loop so each task can check all the other
+        # ones before deciding
+        for task in task_list:
+            self._insert_task[task.processing](task)
+            self.logger.debug('Insert WIM task: %s (%s): %s %s',
+                              task.id, task.status, task.action, task.item)
+
+    def schedule(self, task, when=None, list_name='pending'):
+        """Insert a task in the correct list, respecting the schedule.
+        The refreshing list is ordered by threshold_time (task.process_at)
+        It is assumed that this is called inside this thread
+
+        Arguments:
+            task (Action): object representing the task.
+                This object must implement the ``process`` method and inherit
+                from the ``Action`` class
+            list_name: either 'refresh' or 'pending'
+            when (float): unix time in seconds since as a float number
+        """
+        processing_list = {'refresh': self.refresh_tasks,
+                           'pending': self.pending_tasks}[list_name]
+
+        when = when or time()
+        task.process_at = when
+
+        schedule = (t.process_at for t in processing_list)
+        index = len(list(takewhile(lambda moment: moment <= when, schedule)))
+
+        processing_list.insert(index, task)
+        self.logger.debug(
+            'Schedule of %s in "%s" - waiting position: %d (%f)',
+            task.id, list_name, index, task.process_at)
+
+        return task
+
+    def process_list(self, list_name='pending'):
+        """Process actions in batches and reschedule them if necessary"""
+        task_list, handler = {
+            'refresh': (self.refresh_tasks, self._refresh_single),
+            'pending': (self.pending_tasks, self._process_single)}[list_name]
+
+        now = time()
+        waiting = ((i, task) for i, task in enumerate(task_list)
+                   if task.process_at is None or task.process_at <= now)
+
+        is_superseded = pipe(itemgetter(1), attrgetter('is_superseded'))
+        superseded, active = partition(is_superseded, waiting)
+        superseded = [(i, t.save(self.persist)) for i, t in superseded]
+
+        batch = islice(active, self.BATCH)
+        refreshed = [(i, handler(t)) for i, t in batch]
+
+        # Since pop changes the indexes in the list, we need to do it backwards
+        remove = sorted([i for i, _ in chain(refreshed, superseded)])
+        return len([task_list.pop(i) for i in reversed(remove)])
+
+    def _refresh_single(self, task):
+        """Refresh just a single task, and reschedule it if necessary"""
+        now = time()
+
+        result = task.refresh(self.connector, self.persist)
+        self.logger.debug('Refreshing WIM task: %s (%s): %s %s => %r',
+                          task.id, task.status, task.action, task.item, result)
+
+        interval = self.REFRESH_BUILD if task.is_build else self.REFRESH_ACTIVE
+        self.schedule(task, now + interval, 'refresh')
+
+        return result
+
+    def _process_single(self, task):
+        """Process just a single task, and reschedule it if necessary"""
+        now = time()
+
+        result = task.process(self.connector, self.persist, self.ovim)
+        self.logger.debug('Executing WIM task: %s (%s): %s %s => %r',
+                          task.id, task.status, task.action, task.item, result)
+
+        if task.action == 'DELETE':
+            del self.grouped_tasks[task.group_key]
+
+        self._insert_task[task.processing](task, now + self.RETRY_SCHEDULED)
+
+        return result
+
+    def insert_task(self, task):
+        """Send a message to the running thread
+
+        This function is supposed to be called outside of the WIM Thread.
+
+        Arguments:
+            task (str or dict): `"exit"`, `"reload"` or dict representing a
+                task. For more information about the fields in task, please
+                check the Action class.
+        """
+        try:
+            self.task_queue.put(task, False)
+            return None
+        except queue.Full as e:
+            ex = QueueFull(self.name)
+            raise ex from e
+
+    def reload(self):
+        """Send a message to the running thread to reload itself"""
+        self.insert_task('reload')
+
+    def exit(self):
+        """Send a message to the running thread to kill itself"""
+        self.insert_task('exit')
+
+    def run(self):
+        self.logger.debug('Starting: %s', self.name)
+        recovery_time = 0
+        while True:
+            self.on_start()
+            reload_thread = False
+            self.logger.debug('Reloaded: %s', self.name)
+
+            while True:
+                with self.avoid_exceptions():
+                    while not self.task_queue.empty():
+                        task = self.task_queue.get()
+                        if isinstance(task, dict):
+                            self.insert_pending_tasks([task])
+                        elif isinstance(task, list):
+                            self.insert_pending_tasks(task)
+                        elif isinstance(task, str):
+                            if task == 'exit':
+                                self.logger.debug('Finishing: %s', self.name)
+                                return 0
+                            elif task == 'reload':
+                                reload_thread = True
+                                break
+                        self.task_queue.task_done()
+
+                    if reload_thread:
+                        break
+
+                    if not(self.process_list('pending') +
+                           self.process_list('refresh')):
+                        sleep(self.WAITING_TIME)
+
+                    if isinstance(self.connector, FailingConnector):
+                        # Wait sometime to try instantiating the connector
+                        # again and restart
+                        # Increase the recovery time if restarting is not
+                        # working (up to a limit)
+                        recovery_time = min(self.MAX_RECOVERY_TIME,
+                                            recovery_time + self.RECOVERY_TIME)
+                        sleep(recovery_time)
+                        break
+                    else:
+                        recovery_time = 0
+
+        self.logger.debug("Finishing")
+
+
+def is_pending_group(group):
+    return all(task['action'] != 'DELETE' or
+               task['status'] == 'SCHEDULED'
+               for task in group)
+
+
+def filter_pending_tasks(group):
+    return (t for t in group
+            if (t['status'] == 'SCHEDULED' or
+                t['action'] in ('CREATE', 'FIND')))
+
+
+def action_from(record, logger=None, mapping=ACTIONS):
+    """Create an Action object from a action record (dict)
+
+    Arguments:
+        mapping (dict): Nested data structure that maps the relationship
+            between action properties and object constructors.  This data
+            structure should be a dict with 2 levels of keys: item type and
+            action type. Example::
+                {'wan_link':
+                    {'CREATE': WanLinkCreate}
+                    ...}
+                ...}
+        record (dict): action information
+
+    Return:
+        (Action.Base): Object representing the action
+    """
+    ensure('item' in record, Invalid('`record` should contain "item"'))
+    ensure('action' in record, Invalid('`record` should contain "action"'))
+
+    try:
+        factory = mapping[record['item']][record['action']]
+        return factory(record, logger=logger)
+    except KeyError as e:
+        ex = UndefinedAction(record['item'], record['action'])
+        raise ex from e
diff --git a/RO/osm_ro/wim/wimconn.py b/RO/osm_ro/wim/wimconn.py
new file mode 100644 (file)
index 0000000..92b6db0
--- /dev/null
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+"""The WIM connector is responsible for establishing wide area network
+connectivity.
+
+It receives information from the WimThread/WAN Actions about the endpoints of
+a link that spans across multiple datacenters and stablish a path between them.
+"""
+import logging
+
+from ..http_tools.errors import HttpMappedError
+
+
+class WimConnectorError(HttpMappedError):
+    """Base Exception for all connector related errors"""
+
+
+class WimConnector(object):
+    """Abstract base class for all the WIM connectors
+
+    Arguments:
+        wim (dict): WIM record, as stored in the database
+        wim_account (dict): WIM account record, as stored in the database
+        config (dict): optional persistent information related to an specific
+            connector.  Inside this dict, a special key,
+            ``service_endpoint_mapping`` provides the internal endpoint
+            mapping.
+        logger (logging.Logger): optional logger object. If none is passed
+            ``openmano.wim.wimconn`` is used.
+
+    The arguments of the constructor are converted to object attributes.
+    An extra property, ``service_endpoint_mapping`` is created from ``config``.
+    """
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        self.logger = logger or logging.getLogger('openmano.wim.wimconn')
+
+        self.wim = wim
+        self.wim_account = wim_account
+        self.config = config or {}
+        self.service_endpoint_mapping = (
+            config.get('service_endpoint_mapping', []))
+
+    def check_credentials(self):
+        """Check if the connector itself can access the WIM.
+
+        Raises:
+            WimConnectorError: Issues regarding authorization, access to
+                external URLs, etc are detected.
+        """
+        raise NotImplementedError
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service established
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service
+            conn_info (dict or None): Information returned by the connector
+                during the service creation/edition and subsequently stored in
+                the database.
+
+        Returns:
+            dict: JSON/YAML-serializable dict that contains a mandatory key
+                ``wim_status`` associated with one of the following values::
+
+                    {'wim_status': 'ACTIVE'}
+                        # The service is up and running.
+
+                    {'wim_status': 'INACTIVE'}
+                        # The service was created, but the connector
+                        # cannot determine yet if connectivity exists
+                        # (ideally, the caller needs to wait and check again).
+
+                    {'wim_status': 'DOWN'}
+                        # Connection was previously established,
+                        # but an error/failure was detected.
+
+                    {'wim_status': 'ERROR'}
+                        # An error occurred when trying to create the service/
+                        # establish the connectivity.
+
+                    {'wim_status': 'BUILD'}
+                        # Still trying to create the service, the caller
+                        # needs to wait and check again.
+
+                Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
+                keys can be used to provide additional status explanation or
+                new information available for the connectivity service.
+        """
+        raise NotImplementedError
+
+    def create_connectivity_service(self, service_type, connection_points,
+                                    **kwargs):
+        """Stablish WAN connectivity between the endpoints
+
+        Arguments:
+            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+                ``L3``.
+            connection_points (list): each point corresponds to
+                an entry point from the DC to the transport network. One
+                connection point serves to identify the specific access and
+                some other service parameters, such as encapsulation type.
+                Represented by a dict as follows::
+
+                    {
+                      "service_endpoint_id": ..., (str[uuid])
+                      "service_endpoint_encapsulation_type": ...,
+                           (enum: none, dot1q, ...)
+                      "service_endpoint_encapsulation_info": {
+                        ... (dict)
+                        "vlan": ..., (int, present if encapsulation is dot1q)
+                        "vni": ... (int, present if encapsulation is vxlan),
+                        "peers": [(ipv4_1), (ipv4_2)]
+                            (present if encapsulation is vxlan)
+                      }
+                    }
+
+              The service endpoint ID should be previously informed to the WIM
+              engine in the RO when the WIM port mapping is registered.
+
+        Keyword Arguments:
+            bandwidth (int): value in kilobytes
+            latency (int): value in milliseconds
+
+        Other QoS might be passed as keyword arguments.
+
+        Returns:
+            tuple: ``(service_id, conn_info)`` containing:
+               - *service_uuid* (str): UUID of the established connectivity
+                  service
+               - *conn_info* (dict or None): Information to be stored at the
+                 database (or ``None``). This information will be provided to
+                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+                 **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """Disconnect multi-site endpoints previously connected
+
+        This method should receive as arguments both the UUID and the
+        connection info dict (respectively), as returned by
+        :meth:`~.create_connectivity_service` and
+        :meth:`~.edit_connectivity_service`.
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service
+            conn_info (dict or None): Information returned by the connector
+                during the service creation and subsequently stored in the
+                database.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def edit_connectivity_service(self, service_uuid, conn_info=None,
+                                  connection_points=None, **kwargs):
+        """Change an existing connectivity service.
+
+        This method's arguments and return value follow the same convention as
+        :meth:`~.create_connectivity_service`.
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service.
+            conn_info (dict or None): Information previously stored in the
+                database.
+            connection_points (list): If provided, the old list of connection
+                points will be replaced.
+
+        Returns:
+            dict or None: Information to be updated and stored at the
+                database.
+                When ``None`` is returned, no information should be changed.
+                When an empty dict is returned, the database record will be
+                deleted.
+                **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links in a WIM.
+
+        This method is intended for debugging only, and should delete all the
+        connections controlled by the WIM, not only the WIM connections that
+        a specific RO is aware of.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
diff --git a/RO/osm_ro/wim/wimconn_dynpac.py b/RO/osm_ro/wim/wimconn_dynpac.py
new file mode 100644 (file)
index 0000000..661f6f6
--- /dev/null
@@ -0,0 +1,235 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2018 David García, University of the Basque Country
+# Copyright 2018 University of the Basque Country
+# This file is part of openmano
+# All Rights Reserved.
+# Contact information at http://i2t.ehu.eus
+#
+# # Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+import json
+import logging
+from enum import Enum
+
+from .wimconn import WimConnector, WimConnectorError
+
+
+class WimError(Enum):
+    UNREACHABLE = 'Unable to reach the WIM.',
+    SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.',
+    CONNECTION_POINTS_SIZE = \
+        'Unexpected number of connection points: 2 expected.',
+    ENCAPSULATION_TYPE = \
+        'Unexpected service_endpoint_encapsulation_type. \
+         Only "dotq1" is accepted.',
+    BANDWIDTH = 'Unable to get the bandwidth.',
+    STATUS = 'Unable to get the status for the service.',
+    DELETE = 'Unable to delete service.',
+    CLEAR_ALL = 'Unable to clear all the services',
+    UNKNOWN_ACTION = 'Unknown action invoked.',
+    BACKUP = 'Unable to get the backup parameter.',
+    UNSUPPORTED_FEATURE = "Unsupported feature",
+    UNAUTHORIZED = "Failed while authenticating"
+
+
+class WimAPIActions(Enum):
+    CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY",
+    CREATE_SERVICE = "CREATE_SERVICE",
+    DELETE_SERVICE = "DELETE_SERVICE",
+    CLEAR_ALL = "CLEAR_ALL",
+    SERVICE_STATUS = "SERVICE_STATUS",
+
+
+class DynpacConnector(WimConnector):
+    __supported_service_types = ["ELINE (L2)", "ELINE"]
+    __supported_encapsulation_types = ["dot1q"]
+    __WIM_LOGGER = 'openmano.wimconn.dynpac'
+    __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
+    __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
+    __BACKUP_PARAM = "backup"
+    __BANDWIDTH_PARAM = "bandwidth"
+    __SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
+    __WAN_SERVICE_ENDPOINT_PARAM = "wan_service_endpoint_id"
+    __WAN_MAPPING_INFO_PARAM = "wan_service_mapping_info"
+    __SW_ID_PARAM = "wan_switch_dpid"
+    __SW_PORT_PARAM = "wan_switch_port"
+    __VLAN_PARAM = "vlan"
+
+    # Public functions exposed to the Resource Orchestrator
+    def __init__(self, wim, wim_account, config):
+        self.logger = logging.getLogger(self.__WIM_LOGGER)
+        self.__wim = wim
+        self.__wim_account = wim_account
+        self.__config = config
+        self.__wim_url = self.__wim.get("wim_url")
+        self.__user = wim_account.get("user")
+        self.__passwd = wim_account.get("passwd")
+        self.logger.info("Initialized.")
+
+    def create_connectivity_service(self,
+                                    service_type,
+                                    connection_points,
+                                    **kwargs):
+        self.__check_service(service_type, connection_points, kwargs)
+
+        body = self.__get_body(service_type, connection_points, kwargs)
+
+        headers = {'Content-type': 'application/x-www-form-urlencoded'}
+        endpoint = "{}/service/create".format(self.__wim_url)
+
+        try:
+            response = requests.post(endpoint, data=body, headers=headers)
+        except requests.exceptions.RequestException as e:
+            self.__exception(e.message, http_code=503)
+
+        if response.status_code != 200:
+            error = json.loads(response.content)
+            reason = "Reason: {}. ".format(error.get("code"))
+            description = "Description: {}.".format(error.get("description"))
+            exception = reason + description
+            self.__exception(exception, http_code=response.status_code)
+        uuid = response.content
+        self.logger.info("Service with uuid {} created.".format(uuid))
+        return (uuid, None)
+
+    def edit_connectivity_service(self, service_uuid,
+                                  conn_info, connection_points,
+                                  **kwargs):
+        self.__exception(WimError.UNSUPPORTED_FEATURE, http_code=501)
+
+    def get_connectivity_service_status(self, service_uuid):
+        endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid)
+        try:
+            response = requests.get(endpoint)
+        except requests.exceptions.RequestException as e:
+            self.__exception(e.message, http_code=503)
+
+        if response.status_code != 200:
+            self.__exception(WimError.STATUS, http_code=response.status_code)
+        self.logger.info("Status for service with uuid {}: {}"
+                         .format(service_uuid, response.content))
+        return response.content
+
+    def delete_connectivity_service(self, service_uuid, conn_info):
+        endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid)
+        try:
+            response = requests.delete(endpoint)
+        except requests.exceptions.RequestException as e:
+            self.__exception(e.message, http_code=503)
+        if response.status_code != 200:
+            self.__exception(WimError.DELETE, http_code=response.status_code)
+
+        self.logger.info("Service with uuid: {} deleted".format(service_uuid))
+
+    def clear_all_connectivity_services(self):
+        endpoint = "{}/service/clearAll".format(self.__wim_url)
+        try:
+            response = requests.delete(endpoint)
+            http_code = response.status_code
+        except requests.exceptions.RequestException as e:
+            self.__exception(e.message, http_code=503)
+        if http_code != 200:
+            self.__exception(WimError.CLEAR_ALL, http_code=http_code)
+
+        self.logger.info("{} services deleted".format(response.content))
+        return "{} services deleted".format(response.content)
+
+    def check_connectivity(self):
+        endpoint = "{}/checkConnectivity".format(self.__wim_url)
+
+        try:
+            response = requests.get(endpoint)
+            http_code = response.status_code
+        except requests.exceptions.RequestException as e:
+            self.__exception(e.message, http_code=503)
+
+        if http_code != 200:
+            self.__exception(WimError.UNREACHABLE, http_code=http_code)
+        self.logger.info("Connectivity checked")
+
+    def check_credentials(self):
+        endpoint = "{}/checkCredentials".format(self.__wim_url)
+        auth = (self.__user, self.__passwd)
+
+        try:
+            response = requests.get(endpoint, auth=auth)
+            http_code = response.status_code
+        except requests.exceptions.RequestException as e:
+            self.__exception(e.message, http_code=503)
+
+        if http_code != 200:
+            self.__exception(WimError.UNAUTHORIZED, http_code=http_code)
+        self.logger.info("Credentials checked")
+
+    # Private functions
+    def __exception(self, x, **kwargs):
+        http_code = kwargs.get("http_code")
+        if hasattr(x, "value"):
+            error = x.value
+        else:
+            error = x
+        self.logger.error(error)
+        raise WimConnectorError(error, http_code=http_code)
+
+    def __check_service(self, service_type, connection_points, kwargs):
+        if service_type not in self.__supported_service_types:
+            self.__exception(WimError.SERVICE_TYPE_ERROR, http_code=400)
+
+        if len(connection_points) != 2:
+            self.__exception(WimError.CONNECTION_POINTS_SIZE, http_code=400)
+
+        for connection_point in connection_points:
+            enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM)
+            if enc_type not in self.__supported_encapsulation_types:
+                self.__exception(WimError.ENCAPSULATION_TYPE, http_code=400)
+
+        # Commented out for as long as parameter isn't implemented
+        # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
+        # if not isinstance(bandwidth, int):
+            # self.__exception(WimError.BANDWIDTH, http_code=400)
+
+        # Commented out for as long as parameter isn't implemented
+        # backup = kwargs.get(self.__BACKUP_PARAM)
+        # if not isinstance(backup, bool):
+            # self.__exception(WimError.BACKUP, http_code=400)
+
+    def __get_body(self, service_type, connection_points, kwargs):
+        port_mapping = self.__config.get("service_endpoint_mapping")
+        selected_ports = []
+        for connection_point in connection_points:
+            endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM)
+            port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0]
+            port_info = port.get(self.__WAN_MAPPING_INFO_PARAM)
+            selected_ports.append(port_info)
+        if service_type == "ELINE (L2)" or service_type == "ELINE":
+            service_type = "L2"
+        body = {
+            "connection_points": [{
+                "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM),
+                "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM),
+                "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
+            }, {
+                "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
+                "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
+                "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
+            }],
+            "bandwidth": 100,  # Hardcoded for as long as parameter isn't implemented
+            "service_type": service_type,
+            "backup": False    # Hardcoded for as long as parameter isn't implemented
+        }
+        return "body={}".format(json.dumps(body))
diff --git a/RO/osm_ro/wim/wimconn_fake.py b/RO/osm_ro/wim/wimconn_fake.py
new file mode 100644 (file)
index 0000000..36929f4
--- /dev/null
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This WIM does nothing and allows using it for testing and when no WIM is needed
+"""
+
+import logging
+from uuid import uuid4
+from .wimconn import WimConnector
+
+__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
+
+
+class FakeConnector(WimConnector):
+    """Abstract base class for all the WIM connectors
+
+    Arguments:
+        wim (dict): WIM record, as stored in the database
+        wim_account (dict): WIM account record, as stored in the database
+        config (dict): optional persistent information related to an specific
+            connector.  Inside this dict, a special key,
+            ``service_endpoint_mapping`` provides the internal endpoint
+            mapping.
+        logger (logging.Logger): optional logger object. If none is passed
+            ``openmano.wim.wimconn`` is used.
+
+    The arguments of the constructor are converted to object attributes.
+    An extra property, ``service_endpoint_mapping`` is created from ``config``.
+    """
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        self.logger = logging.getLogger('openmano.wimconn.fake')
+        super(FakeConnector, self).__init__(wim, wim_account, config, logger)
+        self.logger.debug("__init: wim='{}' wim_account='{}'".format(wim, wim_account))
+        self.connections = {}
+        self.counter = 0
+
+    def check_credentials(self):
+        """Check if the connector itself can access the WIM.
+
+        Raises:
+            WimConnectorError: Issues regarding authorization, access to
+                external URLs, etc are detected.
+        """
+        self.logger.debug("check_credentials")
+        return None
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service established
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service
+            conn_info (dict or None): Information returned by the connector
+                during the service creation/edition and subsequently stored in
+                the database.
+
+        Returns:
+            dict: JSON/YAML-serializable dict that contains a mandatory key
+                ``wim_status`` associated with one of the following values::
+
+                Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
+                keys can be used to provide additional status explanation or
+                new information available for the connectivity service.
+        """
+        self.logger.debug("get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(service_uuid,
+                                                                                                     conn_info))
+        return {'wim_status': 'ACTIVE', 'wim_info': self.connectivity.get(service_uuid)}
+
+    def create_connectivity_service(self, service_type, connection_points,
+                                    **kwargs):
+        """
+        Stablish WAN connectivity between the endpoints
+
+        """
+        self.logger.debug("create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".
+                          format(service_type, connection_points, kwargs))
+        _id = str(uuid4())
+        self.connectivity[_id] = {"nb": self.counter}
+        self.counter += 1
+        return _id, self.connectivity[_id]
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """Disconnect multi-site endpoints previously connected
+
+        """
+        self.logger.debug("delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(service_uuid,
+                                                                                                 conn_info))
+        self.connectivity.pop(service_uuid, None)
+        return None
+
+    def edit_connectivity_service(self, service_uuid, conn_info=None,
+                                  connection_points=None, **kwargs):
+        """Change an existing connectivity service.
+
+        This method's arguments and return value follow the same convention as
+        :meth:`~.create_connectivity_service`.
+        """
+        self.logger.debug("edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
+                          "kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs))
+        return None
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links in a WIM.
+
+        This method is intended for debugging only, and should delete all the
+        connections controlled by the WIM, not only the WIM connections that
+        a specific RO is aware of.
+
+        """
+        self.logger.debug("clear_all_connectivity_services")
+        self.connectivity.clear()
+        return None
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        self.logger.debug("get_all_active_connectivity_services")
+        return self.connectivity
diff --git a/RO/osm_ro/wim/wimconn_ietfl2vpn.py b/RO/osm_ro/wim/wimconn_ietfl2vpn.py
new file mode 100644 (file)
index 0000000..dc7cc97
--- /dev/null
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This work has been performed in the context of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 program.
+##
+"""The WIM connector is responsible for establishing wide area network
+connectivity.
+
+This WIM connector implements the standard IETF RFC 8466 "A YANG Data
+ Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
+
+It receives the endpoints and the necessary details to request
+the Layer 2 service.
+"""
+import requests
+import uuid
+import logging
+from .wimconn import WimConnector, WimConnectorError
+"""CHeck layer where we move it"""
+
+
+class WimconnectorIETFL2VPN(WimConnector):
+
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        """IETF L2VPM WIM connector
+
+        Arguments: (To be completed)
+            wim (dict): WIM record, as stored in the database
+            wim_account (dict): WIM account record, as stored in the database
+        """
+        self.logger = logging.getLogger('openmano.wimconn.ietfl2vpn')
+        super(WimconnectorIETFL2VPN, self).__init__(wim, wim_account, config, logger)
+        self.headers = {'Content-Type': 'application/json'}
+        self.mappings = {m['wan_service_endpoint_id']: m
+                         for m in self.service_endpoint_mapping}
+        self.user = wim_account.get("user")
+        self.passwd = wim_account.get("passwd")
+        if self.user and self.passwd is not None:
+            self.auth = (self.user, self.passwd)
+        else:
+            self.auth = None
+        self.logger.info("IETFL2VPN Connector Initialized.")
+
+    def check_credentials(self):
+        endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+        try:
+            response = requests.get(endpoint, auth=self.auth)    
+            http_code = response.status_code
+        except requests.exceptions.RequestException as e:
+            raise WimConnectorError(e.message, http_code=503)
+
+        if http_code != 200:
+            raise WimConnectorError("Failed while authenticating", http_code=http_code)
+        self.logger.info("Credentials checked")
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service stablished
+
+        Arguments:
+            service_uuid: Connectivity service unique identifier
+
+        Returns:
+            Examples::
+                {'wim_status': 'ACTIVE'}
+                {'wim_status': 'INACTIVE'}
+                {'wim_status': 'DOWN'}
+                {'wim_status': 'ERROR'}
+        """
+        try:
+            self.logger.info("Sending get connectivity service stuatus")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                self.wim["wim_url"], service_uuid)
+            response = requests.get(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.ok:
+                raise WimConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
+            service_status = {'wim_status': 'ACTIVE'}
+            return service_status
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
+               
+    def search_mapp(self, connection_point):
+        id = connection_point['service_endpoint_id']
+        if id not in self.mappings:         
+            raise WimConnectorError("Endpoint {} not located".format(str(id)))
+        else:
+            return self.mappings[id]
+
+    def create_connectivity_service(self, service_type, connection_points, **kwargs):
+        """Stablish WAN connectivity between the endpoints
+
+        Arguments:
+            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+                ``L3``.
+            connection_points (list): each point corresponds to
+                an entry point from the DC to the transport network. One
+                connection point serves to identify the specific access and
+                some other service parameters, such as encapsulation type.
+                Represented by a dict as follows::
+
+                    {
+                      "service_endpoint_id": ..., (str[uuid])
+                      "service_endpoint_encapsulation_type": ...,
+                           (enum: none, dot1q, ...)
+                      "service_endpoint_encapsulation_info": {
+                        ... (dict)
+                        "vlan": ..., (int, present if encapsulation is dot1q)
+                        "vni": ... (int, present if encapsulation is vxlan),
+                        "peers": [(ipv4_1), (ipv4_2)]
+                            (present if encapsulation is vxlan)
+                      }
+                    }
+
+              The service endpoint ID should be previously informed to the WIM
+              engine in the RO when the WIM port mapping is registered.
+
+        Keyword Arguments:
+            bandwidth (int): value in kilobytes
+            latency (int): value in milliseconds
+
+        Other QoS might be passed as keyword arguments.
+
+        Returns:
+            tuple: ``(service_id, conn_info)`` containing:
+               - *service_uuid* (str): UUID of the established connectivity
+                  service
+               - *conn_info* (dict or None): Information to be stored at the
+                 database (or ``None``). This information will be provided to
+                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+                 **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        if service_type == "ELINE":
+            if len(connection_points) > 2:
+                raise WimConnectorError('Connections between more than 2 endpoints are not supported')
+            if len(connection_points) < 2:
+                raise WimConnectorError('Connections must be of at least 2 endpoints')
+            """ First step, create the vpn service """    
+            uuid_l2vpn = str(uuid.uuid4())
+            vpn_service = {}
+            vpn_service["vpn-id"] = uuid_l2vpn
+            vpn_service["vpn-scv-type"] = "vpws"
+            vpn_service["svc-topo"] = "any-to-any"
+            vpn_service["customer-name"] = "osm"
+            vpn_service_list = []
+            vpn_service_list.append(vpn_service)
+            vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list}
+            response_service_creation = None
+            conn_info = []
+            self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
+            try:
+                endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+                    self.wim["wim_url"])
+                response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
+                                                          json=vpn_service_l, auth=self.auth)
+            except requests.exceptions.ConnectionError:
+                raise WimConnectorError("Request to create service Timeout", http_code=408)
+            if response_service_creation.status_code == 409:
+                raise WimConnectorError("Service already exists", http_code=response_service_creation.status_code)
+            elif response_service_creation.status_code != requests.codes.created:
+                raise WimConnectorError("Request to create service not accepted",
+                                        http_code=response_service_creation.status_code)
+            """ Second step, create the connections and vpn attachments """   
+            for connection_point in connection_points:
+                connection_point_wan_info = self.search_mapp(connection_point)
+                site_network_access = {}
+                connection = {}
+                if connection_point["service_endpoint_encapsulation_type"] != "none":
+                    if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+                        """ The connection is a VLAN """
+                        connection["encapsulation-type"] = "dot1q-vlan-tagged"
+                        tagged = {}
+                        tagged_interf = {}
+                        service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+                        if service_endpoint_encapsulation_info["vlan"] is None:
+                            raise WimConnectorError("VLAN must be provided")
+                        tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+                        tagged["dot1q-vlan-tagged"] = tagged_interf
+                        connection["tagged-interface"] = tagged
+                    else:
+                        raise NotImplementedError("Encapsulation type not implemented")
+                site_network_access["connection"] = connection
+                self.logger.info("Sending connection:{}".format(connection))
+                vpn_attach = {}
+                vpn_attach["vpn-id"] = uuid_l2vpn
+                vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+                site_network_access["vpn-attachment"] = vpn_attach
+                self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
+                uuid_sna = str(uuid.uuid4())
+                site_network_access["network-access-id"] = uuid_sna
+                site_network_access["bearer"] = connection_point_wan_info["wan_service_mapping_info"]["bearer"]
+                site_network_accesses = {}
+                site_network_access_list = []
+                site_network_access_list.append(site_network_access)
+                site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
+                conn_info_d = {}
+                conn_info_d["site"] = connection_point_wan_info["wan_service_mapping_info"]["site-id"]
+                conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
+                conn_info_d["mapping"] = None
+                conn_info.append(conn_info_d)
+                try:
+                    endpoint_site_network_access_creation = \
+                        "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+                            self.wim["wim_url"], connection_point_wan_info["wan_service_mapping_info"]["site-id"])
+                    response_endpoint_site_network_access_creation = requests.post(
+                        endpoint_site_network_access_creation,
+                        headers=self.headers,
+                        json=site_network_accesses,
+                        auth=self.auth)
+                    
+                    if response_endpoint_site_network_access_creation.status_code == 409:
+                        self.delete_connectivity_service(vpn_service["vpn-id"])
+                        raise WimConnectorError("Site_Network_Access with ID '{}' already exists".format(
+                            site_network_access["network-access-id"]),
+                            http_code=response_endpoint_site_network_access_creation.status_code)
+                    
+                    elif response_endpoint_site_network_access_creation.status_code == 400:
+                        self.delete_connectivity_service(vpn_service["vpn-id"])
+                        raise WimConnectorError("Site {} does not exist".format(
+                            connection_point_wan_info["wan_service_mapping_info"]["site-id"]),
+                            http_code=response_endpoint_site_network_access_creation.status_code)
+                    
+                    elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
+                            response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
+                        self.delete_connectivity_service(vpn_service["vpn-id"])
+                        raise WimConnectorError("Request no accepted",
+                                                http_code=response_endpoint_site_network_access_creation.status_code)
+                
+                except requests.exceptions.ConnectionError:
+                    self.delete_connectivity_service(vpn_service["vpn-id"])
+                    raise WimConnectorError("Request Timeout", http_code=408)
+            return uuid_l2vpn, conn_info
+        
+        else:
+            raise NotImplementedError
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """Disconnect multi-site endpoints previously connected
+
+        This method should receive as the first argument the UUID generated by
+        the ``create_connectivity_service``
+        """
+        try:
+            self.logger.info("Sending delete")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                self.wim["wim_url"], service_uuid)
+            response = requests.delete(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.no_content:
+                raise WimConnectorError("Error in the request", http_code=response.status_code)
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
+
+    def edit_connectivity_service(self, service_uuid, conn_info=None,
+                                  connection_points=None, **kwargs):
+        """Change an existing connectivity service, see
+        ``create_connectivity_service``"""
+
+        # sites = {"sites": {}}
+        # site_list = []
+        vpn_service = {}
+        vpn_service["svc-topo"] = "any-to-any"
+        counter = 0
+        for connection_point in connection_points:
+            site_network_access = {}
+            connection_point_wan_info = self.search_mapp(connection_point)
+            params_site = {}
+            params_site["site-id"] = connection_point_wan_info["wan_service_mapping_info"]["site-id"]
+            params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
+            device_site = {}
+            device_site["device-id"] = connection_point_wan_info["device-id"]
+            params_site["devices"] = device_site
+            # network_access = {}
+            connection = {}
+            if connection_point["service_endpoint_encapsulation_type"] != "none":
+                if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+                    """ The connection is a VLAN """
+                    connection["encapsulation-type"] = "dot1q-vlan-tagged"
+                    tagged = {}
+                    tagged_interf = {}
+                    service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+                    if service_endpoint_encapsulation_info["vlan"] is None:
+                        raise WimConnectorError("VLAN must be provided")
+                    tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+                    tagged["dot1q-vlan-tagged"] = tagged_interf
+                    connection["tagged-interface"] = tagged
+                else:
+                    raise NotImplementedError("Encapsulation type not implemented")
+            site_network_access["connection"] = connection
+            vpn_attach = {}
+            vpn_attach["vpn-id"] = service_uuid
+            vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+            site_network_access["vpn-attachment"] = vpn_attach
+            uuid_sna = conn_info[counter]["site-network-access-id"]
+            site_network_access["network-access-id"] = uuid_sna
+            site_network_access["bearer"] = connection_point_wan_info["wan_service_mapping_info"]["bearer"]
+            site_network_accesses = {}
+            site_network_access_list = []
+            site_network_access_list.append(site_network_access)
+            site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
+            try:
+                endpoint_site_network_access_edit = \
+                    "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+                        self.wim["wim_url"], connection_point_wan_info["wan_service_mapping_info"]["site-id"])
+                response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
+                                                                              headers=self.headers,
+                                                                              json=site_network_accesses,
+                                                                              auth=self.auth)
+                if response_endpoint_site_network_access_creation.status_code == 400:
+                    raise WimConnectorError("Service does not exist",
+                                            http_code=response_endpoint_site_network_access_creation.status_code)
+                elif response_endpoint_site_network_access_creation.status_code != 201 and \
+                        response_endpoint_site_network_access_creation.status_code != 204:
+                    raise WimConnectorError("Request no accepted",
+                                            http_code=response_endpoint_site_network_access_creation.status_code)
+            except requests.exceptions.ConnectionError:
+                raise WimConnectorError("Request Timeout", http_code=408)
+            counter += 1
+        return None
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links corresponding to a WIM"""
+        try:
+            self.logger.info("Sending clear all connectivity services")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+            response = requests.delete(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.no_content:
+                raise WimConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM
+        """
+        try:
+            self.logger.info("Sending get all connectivity services")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+            response = requests.get(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.ok:
+                raise WimConnectorError("Unable to get all connectivity services", http_code=response.status_code)
+            return response
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
diff --git a/RO/osm_ro/wim/wimconn_odl.py b/RO/osm_ro/wim/wimconn_odl.py
new file mode 100644 (file)
index 0000000..2371046
--- /dev/null
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+from .wimconn import WimConnector
+
+
+# TODO: Basically create this file
+
+class OdlConnector(WimConnector):
+    def get_connectivity_service_status(self, link_uuid):
+        raise NotImplementedError
+
+    def create_connectivity_service(self, *args, **kwargs):
+        raise NotImplementedError
+
+    def delete_connectivity_service(self, link_uuid):
+        raise NotImplementedError
diff --git a/RO/requirements.txt b/RO/requirements.txt
new file mode 100644 (file)
index 0000000..731b505
--- /dev/null
@@ -0,0 +1,9 @@
+PyYAML
+bottle
+MySQL-python
+jsonschema
+paramiko
+requests==2.18.*
+netaddr
+logutils
+git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im
diff --git a/RO/setup.py b/RO/setup.py
new file mode 100755 (executable)
index 0000000..5cb1fe9
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# from distutils.core import setup
+# from distutils.command.install_data import install_data
+from setuptools import setup
+from os import system
+# import glob
+
+_name = 'osm_ro'
+_description = 'OSM Resource Orchestrator'
+_author = 'ETSI OSM'
+_author_email = 'alfonso.tiernosepulveda@telefonica.com'
+_maintainer = 'garciadeblas'
+_maintainer_email = 'gerardo.garciadeblas@telefonica.com'
+_license = 'Apache 2.0'
+_url = 'https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary'
+_requirements = [
+    # TODO py3 revise
+    "osm-im",
+    "PyYAML",
+    "bottle",
+    "logutils",
+    "jsonschema",
+    "paramiko",
+    "mysqlclient",
+    #"MySQLdb",
+
+    # common to  VIMS
+    "requests",
+    "netaddr",  # openstack, aws, vmware
+]
+
+setup(
+    name=_name,
+    version_command=('git -C .. describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    description = _description,
+    long_description = open('README.rst').read(),
+    author = _author,
+    author_email = _author_email,
+    maintainer = _maintainer,
+    maintainer_email = _maintainer_email,
+    url = _url,
+    license = _license,
+    packages = [_name],
+    #packages = ['osm_ro', 'osm_roclient'],
+    package_dir = {_name: _name},
+    # package_data = {_name: ['vnfs/*.yaml', 'vnfs/examples/*.yaml',
+    #                    'scenarios/*.yaml', 'scenarios/examples/*.yaml',
+    #                    'instance-scenarios/examples/*.yaml', 'database_utils/*',
+    #                    'scripts/*']},
+    # data_files = [('/etc/osm/', ['osm_ro/openmanod.cfg']),
+    #              ('/etc/systemd/system/', ['osm_ro/osm-ro.service']),
+    #              ],
+    scripts=['osm_ro/scripts/RO-start.sh'
+      #'openmanod', 'openmano', 'osm_ro/scripts/service-openmano', 'osm_ro/scripts/openmano-report',
+      ],
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/IM.git#egg=osm-im"],
+    install_requires=_requirements,
+    include_package_data=True,
+    setup_requires=['setuptools-version-command'],
+    #test_suite='nose.collector',
+)
+
diff --git a/RO/stdeb.cfg b/RO/stdeb.cfg
new file mode 100644 (file)
index 0000000..6260296
--- /dev/null
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Maintainer: Gerardo Garcia <gerardo.garciadeblas@telefonica.com>
+Depends3 : python3-bottle, python3-jsonschema, python3-mysqldb, python3-paramiko, python3-yaml,
+           libmysqlclient-dev, mysql-client,
+           python3-requests, python3-netaddr,
+           python3-osm-im,
+
+# TODO py3   libssl-dev, libffi-dev, python-logutils, python-lib-osm-openvim,
+# TODO py3   python3-networkx
diff --git a/RO/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml b/RO/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml
new file mode 100644 (file)
index 0000000..22e372f
--- /dev/null
@@ -0,0 +1,38 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple_ha
+  description:   Simple network scenario consisting of two VNF connected to an external network
+  vnfs:
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  linux_test_2vms # VNF name as introduced in OPENMANO DB
+  networks:
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces:
+      - linux1:  control0       # Node and its interface
+      - linux1:  control1       # Node and its interface
+
+
+
+
diff --git a/RO/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml b/RO/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml
new file mode 100644 (file)
index 0000000..53c7770
--- /dev/null
@@ -0,0 +1,61 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+
+vnf:
+    name:        linux_test_2vms
+    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+    external-connections:
+    -   name:              control0
+        type:              mgmt              # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              linux-VM-HA-A  # Virtual Machine this interface belongs to
+        local_iface_name:  eth0             # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface 0
+    -   name:              control1
+        type:              mgmt              # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              linux-VM-HA-B  # Virtual Machine this interface belongs to
+        local_iface_name:  eth0             # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface 1
+    VNFC:
+    -   name:        linux-VM-HA-A
+        description: Generic Linux Virtual Machine
+        availability_zone: A  # availanility zone A
+        #Copy the image to a compute path and edit this path
+        image name:  TestVM
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 10
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:11.0"
+        numas: []
+    -   name:        linux-VM-HA-B
+        description: Generic Linux Virtual Machine
+        availability_zone: B # availanility zone B
+        #Copy the image to a compute path and edit this path
+        image name:  TestVM
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 10
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:12.0"
+        numas: []
diff --git a/RO/test/RO_tests/empy_volume/scenario_additional_disk_empty_volume.yaml b/RO/test/RO_tests/empy_volume/scenario_additional_disk_empty_volume.yaml
new file mode 100644 (file)
index 0000000..c0b541c
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          vnf_additional_disk_empty_volume
+  description:   Just deploy vnf_2_disks
+  public:        false      # if available for other tenants
+  vnfs:
+    vnf_2_disks:                     # vnf name in the scenario
+      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
+      vnf_name:  vnf_additional_disk_empty_volume   #can fail if several vnfs matches this name
+      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+  networks:                
+    mgmt:
+      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+      type:      bridge
+      external:  true       #this will be connected outside
+      interfaces:
+      -   vnf_2_disks:  mgmt0
+
diff --git a/RO/test/RO_tests/empy_volume/vnfd_additional_disk_empty_volume.yaml b/RO/test/RO_tests/empy_volume/vnfd_additional_disk_empty_volume.yaml
new file mode 100644 (file)
index 0000000..7a6f5c2
--- /dev/null
@@ -0,0 +1,63 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name: vnf_additional_disk_empty_volume
+    description: VNF with additional volume based on image
+    # class: parent      # Optional. Used to organize VNFs
+    external-connections:
+    -   name:              mgmt0
+        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              TEMPLATE-VM # Virtual Machine this interface belongs to
+        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface
+    VNFC:                              # Virtual machine array 
+    -   name:        TEMPLATE-VM       # name of Virtual Machine
+        description: TEMPLATE description
+        image name: ubuntu16.04
+        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+        # processor:                     #Optional
+        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        # hypervisor:                    #Optional
+        #     type: QEMU-kvm
+        #     version: "10002|12001|2.6.32-358.el6.x86_64"
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 5          # disk size in GiB, by default 1
+        #numas: 
+        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
+        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+        #    memory: 14                 # GBytes
+        #    interfaces: []
+        bridge-ifaces:
+        -   name:      mgmt0
+            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
+            bandwidth: 1 Mbps            # Optional. Informative only
+            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+        devices:                       # Optional, order determines device letter asignation (hda, hdb, ...)
+        -   type:      disk            # "disk","cdrom","xml"
+            size: 1
+            # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+            # vpci:      "0000:00:03.0"   # Optional, not for disk or cdrom
+    # Additional Virtual Machines would be included here
+
diff --git a/RO/test/RO_tests/floating_ip/scenario_floating_ip.yaml b/RO/test/RO_tests/floating_ip/scenario_floating_ip.yaml
new file mode 100644 (file)
index 0000000..dcfb239
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          vnf_floating_ip
+  description:   vnf_floating_ip
+  public:        false      # if available for other tenants
+  vnfs:
+    vnf_floating_ip:                     # vnf name in the scenario
+      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
+      vnf_name:  vnf_floating_ip   #can fail if several vnfs matches this name
+      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+  networks:                
+    mgmt:
+      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+      type:      bridge
+      external:  true       #this will be connected outside
+      interfaces:
+      -   vnf_floating_ip:  mgmt0
+
diff --git a/RO/test/RO_tests/floating_ip/vnfd_floating_ip.yaml b/RO/test/RO_tests/floating_ip/vnfd_floating_ip.yaml
new file mode 100644 (file)
index 0000000..0d305a9
--- /dev/null
@@ -0,0 +1,59 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name: vnf_floating_ip
+    description: VNF disabling port_security option in mgmt interface 
+    # class: parent      # Optional. Used to organize VNFs
+    external-connections:
+    -   name:              mgmt0
+        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              vnf_floating_ip # Virtual Machine this interface belongs to
+        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface
+    VNFC:                              # Virtual machine array 
+    -   name:        vnf_floating_ip       # name of Virtual Machine
+        description: vnf_floating_ip
+        image name: ubuntu16.04
+        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+        # processor:                     #Optional
+        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        # hypervisor:                    #Optional
+        #     type: QEMU-kvm
+        #     version: "10002|12001|2.6.32-358.el6.x86_64"
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 5          # disk size in GiB, by default 1
+        #numas: 
+        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
+        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+        #    memory: 14                 # GBytes
+        #    interfaces: []
+        bridge-ifaces:
+        -   name:      mgmt0
+            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
+            bandwidth: 1 Mbps            # Optional. Informative only
+            floating-ip: True
+            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+    # Additional Virtual Machines would be included here
+
diff --git a/RO/test/RO_tests/image_based_volume/scenario_additional_disk_based_image.yaml b/RO/test/RO_tests/image_based_volume/scenario_additional_disk_based_image.yaml
new file mode 100644 (file)
index 0000000..34ffeb2
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+---
+nsd:nsd-catalog:
+    nsd:
+    -   id:          test_2vdu_nsd
+        name:        additional_disk_based_image
+        short-name:  2disks
+        description: Just deploy vnf_2_disks
+        vendor:      OSM
+        version:     '1.0'
+        constituent-vnfd:
+        -   member-vnf-index: vnf2disks
+            vnfd-id-ref: additional_disk_based_image
+        vld:
+        # Networks for the VNFs
+        -   id:         vld1
+            name:       mgmt
+            short-name: vld1-sname
+            type:       ELAN
+            mgmt-network: 'true'
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref: vnf2disks
+                vnfd-id-ref: additional_disk_based_image
+                vnfd-connection-point-ref: mgmt0
diff --git a/RO/test/RO_tests/image_based_volume/vnfd_additional_disk_based_image.yaml b/RO/test/RO_tests/image_based_volume/vnfd_additional_disk_based_image.yaml
new file mode 100644 (file)
index 0000000..30aafac
--- /dev/null
@@ -0,0 +1,58 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+---
+vnfd-catalog:
+    vnfd:
+    -   connection-point:
+        -   name: mgmt0
+            type: VPORT
+        name: vnf_additional_disk_based_image
+        description: VNF with additional volume based on image
+        id: additional_disk_based_image
+        # short-name: 2disks
+        vendor: ROtest
+        version: '1.0'
+        mgmt-interface:
+            cp: mgmt0
+        vdu:
+        -   id: VM1
+            name: VM1-name
+            image: US1604
+            alternative-images:
+            -   vim-type: openstack
+                image: cirros
+            -   vim-type: openvim
+                image: cirros034
+            volumes:
+            -   name: vdb
+                device-type: disk
+                image: cirros034
+                # image-checksum: 4a293322f18827af81a9450e3792947c
+                size: 8
+            interface:
+            -   name: iface11
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: mgmt0
+                mac-address:   "52:33:44:55:66:77"
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
diff --git a/RO/test/RO_tests/no_port_security/scenario_vnf_no_port_security.yaml b/RO/test/RO_tests/no_port_security/scenario_vnf_no_port_security.yaml
new file mode 100644 (file)
index 0000000..1f2e41b
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          vnf_no_port_security
+  description:   vnf_no_port_security
+  public:        false      # if available for other tenants
+  vnfs:
+    vnf_no_port_security:                     # vnf name in the scenario
+      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
+      vnf_name:  vnf_no_port_security   #can fail if several vnfs matches this name
+      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+  networks:                
+    mgmt:
+      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+      type:      bridge
+      external:  true       #this will be connected outside
+      interfaces:
+      -   vnf_no_port_security:  mgmt0
+
diff --git a/RO/test/RO_tests/no_port_security/vnfd_no_port_security.yaml b/RO/test/RO_tests/no_port_security/vnfd_no_port_security.yaml
new file mode 100644 (file)
index 0000000..11874ed
--- /dev/null
@@ -0,0 +1,59 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name: vnf_no_port_security
+    description: VNF disabling port_security option in mgmt interface 
+    # class: parent      # Optional. Used to organize VNFs
+    external-connections:
+    -   name:              mgmt0
+        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              vnf_no_port_security # Virtual Machine this interface belongs to
+        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface
+    VNFC:                              # Virtual machine array 
+    -   name:        vnf_no_port_security       # name of Virtual Machine
+        description: vnf_no_port_security
+        image name: ubuntu16.04
+        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+        # processor:                     #Optional
+        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        # hypervisor:                    #Optional
+        #     type: QEMU-kvm
+        #     version: "10002|12001|2.6.32-358.el6.x86_64"
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 5          # disk size in GiB, by default 1
+        #numas: 
+        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
+        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+        #    memory: 14                 # GBytes
+        #    interfaces: []
+        bridge-ifaces:
+        -   name:      mgmt0
+            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
+            bandwidth: 1 Mbps            # Optional. Informative only
+            port-security: False
+            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+    # Additional Virtual Machines would be included here
+
diff --git a/RO/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml b/RO/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml
new file mode 100644 (file)
index 0000000..4dfd3c5
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_passthrough
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    passthrough1:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough2:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - passthrough1:  eth0       # Node and its interface
+      - passthrough2:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - passthrough1:  xe0       # Node and its interface
+      - passthrough2:  xe0       # Node and its interface
+
diff --git a/RO/test/RO_tests/passthrough/vnfd_1passthrough.yaml b/RO/test/RO_tests/passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..ab24adf
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml b/RO/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml
new file mode 100644 (file)
index 0000000..d243c0e
--- /dev/null
@@ -0,0 +1,50 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_passthrough
+  description:   Network scenario consisting of 4 machines with a passthrough interconnected between them
+  vnfs: 
+    passthrough1:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough2:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough3:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough4:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - passthrough1:  eth0       # Node and its interface
+      - passthrough2:  eth0       # Node and its interface
+      - passthrough3:  eth0       # Node and its interface
+      - passthrough4:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - passthrough1:  xe0       # Node and its interface
+      - passthrough2:  xe0       # Node and its interface
+      - passthrough3:  xe0       # Node and its interface
+      - passthrough4:  xe0       # Node and its interface
+
diff --git a/RO/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml b/RO/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..ab24adf
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml b/RO/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml
new file mode 100644 (file)
index 0000000..9e24552
--- /dev/null
@@ -0,0 +1,50 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov
+  description:   Network scenario consisting of four machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov1:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov2:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov3:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov4:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov1:  eth0       # Node and its interface
+      - sriov2:  eth0       # Node and its interface
+      - sriov3:  eth0       # Node and its interface
+      - sriov4:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov1:  xe0       # Node and its interface
+      - sriov2:  xe0       # Node and its interface
+      - sriov3:  xe0       # Node and its interface
+      - sriov4:  xe0       # Node and its interface
+
diff --git a/RO/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml b/RO/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..2d4ad5b
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml b/RO/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml
new file mode 100644 (file)
index 0000000..322c094
--- /dev/null
@@ -0,0 +1,50 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov_passthrough
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov1:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    passthrough1:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    sriov2:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    passthrough2:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov1:  eth0       # Node and its interface
+      - passthrough1:  eth0       # Node and its interface
+      - sriov2:  eth0       # Node and its interface
+      - passthrough2:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov1:  xe0       # Node and its interface
+      - passthrough1:  xe0       # Node and its interface
+      - sriov2:  xe0       # Node and its interface
+      - passthrough2:  xe0       # Node and its interface
+
diff --git a/RO/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml b/RO/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..ab24adf
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml b/RO/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..2d4ad5b
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml b/RO/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml
new file mode 100644 (file)
index 0000000..e05d416
--- /dev/null
@@ -0,0 +1,37 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple
+  description:   Simple network scenario consisting of two VNF connected to an external network
+  vnfs: 
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  linux        # VNF name as introduced in OPENMANO DB
+    linux2:                   # vnf/net name in the scenario
+      vnf_name:  linux        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - linux1:  eth0       # Node and its interface
+      - linux2:  eth0       # Node and its interface
+
diff --git a/RO/test/RO_tests/simple_2_vnf/vnfd_linux.yaml b/RO/test/RO_tests/simple_2_vnf/vnfd_linux.yaml
new file mode 100644 (file)
index 0000000..45c670f
--- /dev/null
@@ -0,0 +1,42 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        linux
+    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              linux-VM
+        local_iface_name:  eth0
+        description:       General purpose interface
+    VNFC:
+    -   name:        linux-VM
+        description: Generic Linux Virtual Machine
+        #Copy the image to a compute path and edit this path
+        image name:  image_name.qcow2
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 10
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:11.0"
+        numas: []
diff --git a/RO/test/RO_tests/simple_cloud_init/scenario_simple-cloud-init.yaml b/RO/test/RO_tests/simple_cloud_init/scenario_simple-cloud-init.yaml
new file mode 100644 (file)
index 0000000..77fc17e
--- /dev/null
@@ -0,0 +1,34 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple-cloud-init
+  description:   Simple network scenario consisting of a single VNF connected to an external network
+  vnfs: 
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  linux-cloud-init       # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - linux1:  eth0       # Node and its interface
+
diff --git a/RO/test/RO_tests/simple_cloud_init/vnfd_linux-cloud-init.yaml b/RO/test/RO_tests/simple_cloud_init/vnfd_linux-cloud-init.yaml
new file mode 100644 (file)
index 0000000..aa415cf
--- /dev/null
@@ -0,0 +1,67 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+---
+schema_version: "0.2"
+vnf:
+    name:        linux-cloud-init
+    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+    external-connections:
+    -   name:              eth0
+        type:              mgmt
+        description:       General purpose interface
+        VNFC:              linux-VM
+        local_iface_name:  eth0
+    VNFC:
+    -   name:        linux-VM
+        description: Generic Linux Virtual Machine
+        #Copy the image to a compute path and edit this path
+        image name:  ubuntu16.04
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram:   2048         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk:  20
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:11.0"
+        numas: []
+        boot-data: 
+            key-pairs: 
+            -  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
+            users:
+            -  name: atierno
+               key-pairs: 
+               -  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
+            boot-data-drive: true
+            config-files: 
+            -   content: |
+                       auto enp0s3
+                       iface enp0s3 inet dhcp
+                dest: /etc/network/interfaces.d/enp0s3.cfg
+                permissions: '0644'
+                owner: root:root
+            -   content: |
+                       #! /bin/bash
+                       ls -al >> /var/log/osm.log
+                dest: /etc/rc.local
+                permissions: '0755'
+            -   content: "file content"
+                dest: /etc/test_delete
+
diff --git a/RO/test/RO_tests/simple_count3/scenario_linux_count3.yaml b/RO/test/RO_tests/simple_count3/scenario_linux_count3.yaml
new file mode 100644 (file)
index 0000000..0a4116c
--- /dev/null
@@ -0,0 +1,39 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple_count3
+  description:   Simple network scenario consisting of a multi VNFC VNF connected to an external network
+  vnfs: 
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  simple_linux_count3        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - linux1:  control0       # Node and its interface
+    internal1:                   # provide a name for this net or connection
+      external:  false
+      interfaces: 
+      - linux1:  data-eth1
+
+
diff --git a/RO/test/RO_tests/simple_count3/vnfd_count3.yaml b/RO/test/RO_tests/simple_count3/vnfd_count3.yaml
new file mode 100644 (file)
index 0000000..712d392
--- /dev/null
@@ -0,0 +1,68 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.2"
+vnf:
+    name:        simple_linux_count3
+    description: "Example of a linux VNF consisting of two VMs with one internal network"
+    # class: parent      # Optional. Used to organize VNFs
+    internal-connections:
+    -   name:        internal-eth2
+        description: internalnet
+        type:        e-lan
+        implementation: overlay
+        ip-profile:
+            ip-version:       IPv4
+            subnet-address:   192.168.1.0/24
+            gateway-address:  192.168.1.1
+            dns-address:      8.8.8.8
+            dhcp:
+                enabled: true
+                start-address: 192.168.1.100
+                count: 100
+        elements:
+        -   VNFC:             linux_3VMs
+            local_iface_name: eth2
+            ip_address:       192.168.1.2
+    external-connections:
+    -   name:              control0
+        type:              mgmt
+        VNFC:              linux_3VMs
+        local_iface_name:  eth0
+        description:       control interface VM1
+    -   name:              data-eth1
+        type:              bridge
+        VNFC:              linux_3VMs
+        local_iface_name:  eth1
+        description:       data interface input
+    VNFC:
+    -   name:        linux_3VMs
+        count:       3
+        description: "Linux VM1 2 CPUs, 2 GB RAM and 3 bridge interfaces"
+        #Copy the image to a compute path and edit this path
+        image name:  TestVM
+        disk: 10
+        vcpus: 2
+        ram: 2048
+        bridge-ifaces:
+        -   name:      eth0
+        -   name:      eth1
+        -   name:      eth2
diff --git a/RO/test/RO_tests/simple_linux/scenario_simple_linux.yaml b/RO/test/RO_tests/simple_linux/scenario_simple_linux.yaml
new file mode 100644 (file)
index 0000000..b6cff70
--- /dev/null
@@ -0,0 +1,46 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+nsd:nsd-catalog:
+    nsd:
+    -   id: simple
+        name: simple
+        vendor:      OSM
+        version:     '1.0'
+        description:   Simple network scenario consisting of a single VNF connected to an external network
+        constituent-vnfd:
+        # The member-vnf-index needs to be unique, starting from 1
+        # vnfd-id-ref is the id of the VNFD
+        # Multiple constituent VNFDs can be specified
+        -   member-vnf-index: 1
+            vnfd-id-ref: linux
+        vld:
+        # Networks for the VNFs
+        -   id: vld1
+            name: mgmt
+            short-name: vld1-sname
+            type: ELAN
+            mgmt-network: 'true'
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: linux
+                vnfd-connection-point-ref: eth0
+
diff --git a/RO/test/RO_tests/simple_linux/vnfd_linux.yaml b/RO/test/RO_tests/simple_linux/vnfd_linux.yaml
new file mode 100644 (file)
index 0000000..a666124
--- /dev/null
@@ -0,0 +1,46 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnfd-catalog:
+    vnfd:
+     -  id: linux
+        name: linux
+        description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+        connection-point:
+        -   name: eth0
+            type: VPORT
+        vdu:
+        -   id: linux-VM
+            name: linux-VM
+            description: Generic Linux Virtual Machine
+            #Copy the image to a compute path and edit this path
+            image:  image_name.qcow2
+            vm-flavor:
+                  memory-mb: '1024'
+                  storage-gb: '10'
+                  vcpu-count: '1'
+            interface:
+            -   name: eth0
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:11.0"
+                external-connection-point-ref: eth0
diff --git a/RO/test/RO_tests/simple_multi_vnfc/scenario_multi_vnfc.yaml b/RO/test/RO_tests/simple_multi_vnfc/scenario_multi_vnfc.yaml
new file mode 100644 (file)
index 0000000..07b8902
--- /dev/null
@@ -0,0 +1,49 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+nsd:nsd-catalog:
+    nsd:
+    -   id: simple_multi_vnfc
+        name: simple_multi_vnfc
+        vendor:      OSM
+        version:     '1.0'
+        description:   Simple network scenario consisting of a multi VNFC VNF connected to an external network
+        constituent-vnfd:
+        # The member-vnf-index needs to be unique, starting from 1
+        # vnfd-id-ref is the id of the VNFD
+        # Multiple constituent VNFDs can be specified
+        -   member-vnf-index: 1
+            vnfd-id-ref: linux_2VMs_v02
+        vld:
+        # Networks for the VNFs
+        -   id: vld1
+            name: mgmt
+            short-name: vld1-sname
+            type: ELAN
+            mgmt-network: 'true'
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: linux_2VMs_v02
+                vnfd-connection-point-ref: eth0
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: linux_2VMs_v02
+                vnfd-connection-point-ref: xe1
+
diff --git a/RO/test/RO_tests/simple_multi_vnfc/vnfd_linux_2VMs_v02.yaml b/RO/test/RO_tests/simple_multi_vnfc/vnfd_linux_2VMs_v02.yaml
new file mode 100644 (file)
index 0000000..8d541c6
--- /dev/null
@@ -0,0 +1,124 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnfd-catalog:
+    vnfd:
+     -  id: linux_2VMs_v02
+        name: linux_2VMs_v02
+        description: "Example of a linux VNF consisting of two VMs with one internal network"
+        connection-point:
+        -   id: eth0
+            name: eth0
+            short-name: eth0
+            type: VPORT
+        -   id: xe1
+            name: xe1
+            short-name: xe1
+            type: VPORT
+        internal-vld:
+        -   id: internalnet
+            name: internalnet
+            short-name: internalnet
+            ip-profile-ref: ip-prof1
+            type: ELAN
+            internal-connection-point:
+            -   id-ref: VM1-xe0
+            -   id-ref: VM2-xe0
+        ip-profiles:
+        -   name: ip-prof1
+            description: IP profile
+            gateway-address:  192.168.1.1
+            dns-address: 8.8.8.8
+            #-   address: 8.8.8.8
+            ip-profile-params:
+            ip-version: ipv4
+            subnet-address: 192.168.1.0/24
+            dhcp-params:
+                enabled: true
+                start-address: 192.168.1.100
+                count: 100
+        vdu:
+        -   id: linux_2VMs-VM1
+            name: linux_2VMs-VM1
+            description: Generic Linux Virtual Machine
+            #Copy the image to a compute path and edit this path
+            image:  TestVM
+            vm-flavor:
+                  memory-mb: '2048'
+                  storage-gb: '10'
+                  vcpu-count: '4'
+            interface:
+            -   name: eth0
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:09.0"
+                external-connection-point-ref: eth0
+            -   name: xe0
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:11.0"
+                internal-connection-point-ref: VM1-xe0
+            -   name: xe1
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:12.0"
+                external-connection-point-ref: xe1
+            internal-connection-point:
+            - id: VM1-xe0
+              name: VM1-xe0
+              short-name: VM1-xe0
+              type: VPORT
+        -   id: linux_2VMs-VM2
+            name: linux_2VMs-VM2
+            description: Generic Linux Virtual Machine
+            #Copy the image to a compute path and edit this path
+            image:  TestVM
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '10'
+                vcpu-count: '4'
+            interface:
+            -   name: eth0
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:09.0"
+                external-connection-point-ref: eth0
+            -   name: xe0
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:11.0"
+                internal-connection-point-ref: VM2-xe0
+            -   name: xe1
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                    vpci:      "0000:00:12.0"
+                external-connection-point-ref: xe1
+            internal-connection-point:
+            -   id: VM2-xe0
+                name: VM2-xe0
+                short-name: VM2-xe0
+                type: VPORT
diff --git a/RO/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml b/RO/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml
new file mode 100644 (file)
index 0000000..a0b888c
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov1:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov2:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov1:  eth0       # Node and its interface
+      - sriov2:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov1:  xe0       # Node and its interface
+      - sriov2:  xe0       # Node and its interface
+
diff --git a/RO/test/RO_tests/sr_iov/vnfd_1sriov.yaml b/RO/test/RO_tests/sr_iov/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..2d4ad5b
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml b/RO/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml
new file mode 100644 (file)
index 0000000..29bd4c8
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov_passthrough
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    passthrough:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov:  eth0       # Node and its interface
+      - passthrough:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov:  xe0       # Node and its interface
+      - passthrough:  xe0       # Node and its interface
+
diff --git a/RO/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml b/RO/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..ab24adf
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml b/RO/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..2d4ad5b
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/RO/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml b/RO/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml
new file mode 100644 (file)
index 0000000..fb76079
--- /dev/null
@@ -0,0 +1,84 @@
+nsd:nsd-catalog:
+    nsd:
+    -   id:          test_2vdu_nsd
+        name:        test_2vdu_nsd_name
+        short-name:  test_2vdu_nsd_sname
+        description: 2 vnfs, eatch one with 3 cirros vdu
+        vendor:      OSM
+        version:     '1.0'
+
+        # Place the logo as png in icons directory and provide the name here
+        logo:        osm_2x.png
+
+        # Specify the VNFDs that are part of this NSD
+        constituent-vnfd:
+            # The member-vnf-index needs to be unique, starting from 1
+            # vnfd-id-ref is the id of the VNFD
+            # Multiple constituent VNFDs can be specified
+        -   member-vnf-index: 1
+            vnfd-id-ref: test_2vdu
+        -   member-vnf-index: 2
+            vnfd-id-ref: test_2vdu2
+
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: 10.31.31.254
+                ip-version:      ipv4
+                subnet-address:  10.31.31.0/24
+                dns-server:
+                -   address: 8.8.8.8
+                -   address: 8.8.8.9 
+                dhcp-params:
+                  count: 200
+                  start-address: 10.31.31.20
+            name: ipprofileA
+        -   description: IP profile that disables dhcp server
+            ip-profile-params:
+                dhcp-params:
+                    enabled: 'false'
+                ip-version: ipv4
+            name: no_dhcp
+
+        vld:
+        # Networks for the VNFs
+        -   id:         vld1
+            name:       mgmt
+            short-name: vld1-sname
+            type:       ELAN
+            mgmt-network: 'true'
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: test_2vdu
+                vnfd-connection-point-ref: eth0
+            -   member-vnf-index-ref: 2
+                vnfd-id-ref: test_2vdu2
+                vnfd-connection-point-ref: eth0
+
+        -   id:         vld2
+            name:       nsd-vld2
+            short-name: vld2-sname
+            type:       ELAN
+            ip-profile-ref: ipprofileA
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref:      1
+                vnfd-id-ref:               test_2vdu
+                vnfd-connection-point-ref: eth1
+                ip-address:                10.31.31.4
+            -   member-vnf-index-ref:      2
+                vnfd-id-ref:               test_2vdu2
+                vnfd-connection-point-ref: eth1
+                ip-address:                10.31.31.5
+
+        -   id:         vld3
+            name:       nsd-vld3
+            short-name: vld3-sname
+            type:       ELAN
+            ip-profile-ref: no_dhcp
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref:      1
+                vnfd-id-ref:               test_2vdu
+                vnfd-connection-point-ref: eth4
+            -   member-vnf-index-ref:      2
+                vnfd-id-ref:               test_2vdu2
+                vnfd-connection-point-ref: eth4
diff --git a/RO/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml b/RO/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml
new file mode 100644 (file)
index 0000000..e790a9c
--- /dev/null
@@ -0,0 +1,93 @@
+vnfd-catalog:
+    vnfd:
+    -   connection-point:
+        -   name: eth0
+            type: VPORT
+        -   name: eth1
+            type: VPORT
+        -   name: eth4
+            type: VPORT
+        description: VNF with internal VLD and set IP and mac
+        id: test_2vdu
+        name: test_2vdu_name
+        short-name: test_2vdu_sname
+        mgmt-interface:
+            cp: eth0
+        internal-vld:
+        -   description: Internal VL
+            id:          net_internal
+            name:        internal_vld1
+            short-name:  net_internal_sname
+            type:        ELAN
+            internal-connection-point:
+            -   id-ref:     eth2
+                ip-address: 10.10.135.4
+            -   id-ref:     eth3
+                ip-address: 10.10.135.5
+            ip-profile-ref: ip-profile1
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: null
+                ip-version:      ipv4
+                subnet-address:  10.10.135.0/24
+                dhcp-params:
+                  count:         100
+                  start-address: 10.10.135.20
+            name: ip-profile1
+        vdu:
+        -   id: VM1
+            name: VM11
+            image: US1604
+            interface:
+            -   name: iface11
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth0
+                mac-address:   "52:33:44:55:66:77"
+            -   name: iface12
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth2
+                mac-address:   "52:33:44:55:66:78"
+            -   name: iface13
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth4
+            internal-connection-point:
+            -   name: eth2-icp
+                id:   eth2
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        -   id: VM2
+            image: US1604
+            name: VM12
+            interface:
+            -   name: iface21
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth1
+                mac-address:   52:33:44:55:66:79
+            -   name: iface22
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth3
+                mac-address:   52:33:44:55:66:80
+            internal-connection-point:
+            -   name: eth3-icp
+                id:   eth3
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        vendor: ROtest
+        version: '1.0'
diff --git a/RO/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml b/RO/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml
new file mode 100644 (file)
index 0000000..6c4b6cf
--- /dev/null
@@ -0,0 +1,93 @@
+vnfd-catalog:
+    vnfd:
+    -   connection-point:
+        -   name: eth0
+            type: VPORT
+        -   name: eth1
+            type: VPORT
+        -   name: eth4
+            type: VPORT
+        description: VNF with internal VLD and set IP and mac
+        id: test_2vdu2
+        name: test_2vdu2_name
+        short-name: test_2vdu2_sname
+        mgmt-interface:
+            cp: eth0
+        internal-vld:
+        -   description: Internal VL
+            id:          net_internal
+            name:        internal_vld2
+            short-name:  net_internal_sname
+            type:        ELAN
+            internal-connection-point:
+            -   id-ref:     eth2
+                ip-address: 10.10.133.4
+            -   id-ref:     eth3
+                ip-address: 10.10.133.5
+            ip-profile-ref: ip-profile1
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: 10.10.133.1
+                ip-version:      ipv4
+                subnet-address:  10.10.133.0/24
+                dhcp-params:
+                  count:         200
+                  start-address: 10.10.133.20
+            name: ip-profile1
+        vdu:
+        -   id: VM1
+            name: VM21
+            image: US1604
+            interface:
+            -   name: iface11
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth0
+                mac-address:   "52:33:44:55:66:81"
+            -   name: iface12
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth2
+                mac-address:   "52:33:44:55:66:82"
+            -   name: iface13
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth4
+            internal-connection-point:
+            -   name: eth2-icp
+                id:   eth2
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        -   id: VM2
+            image: US1604
+            name: VM22
+            interface:
+            -   name: iface21
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth1
+                mac-address:   52:33:44:55:66:83
+            -   name: iface22
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth3
+                mac-address:   52:33:44:55:66:84
+            internal-connection-point:
+            -   name: eth3-icp
+                id:   eth3
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        vendor: ROtest
+        version: '1.0'
diff --git a/RO/test/basictest.sh b/RO/test/basictest.sh
new file mode 100755 (executable)
index 0000000..8f5a225
--- /dev/null
@@ -0,0 +1,310 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano.
+#WARNING: It destroy the database content
+
+
+function usage(){
+    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test openmano using openvim as a VIM"
+    echo -e "           the OPENVIM_HOST, OPENVIM_PORT shell variables indicate openvim location"
+    echo -e "           by default localhost:9080"
+    echo -e "  <action> is a list of the following items (by default 'reset add-openvim create delete del-openvim')"
+    echo -e "    reset       resets the openmano database content and creates osm tenant"
+    echo -e "    add-openvim adds and attaches a local openvim datacenter"
+    echo -e "    del-openvim detaches and deletes the local openvim datacenter"
+    echo -e "    create      creates VNFs, scenarios and instances"
+    echo -e "    delete      deletes the created instances, scenarios and VNFs"
+    echo -e "    delete-all  deletes ALL the existing instances, scenarios and vnf at the current tenant"
+    echo -e "  OPTIONS:"
+    echo -e "    -f --force       does not prompt for confirmation"
+    echo -e "    -h --help        shows this help"
+    echo -e "    --screen         forces to run openmano (and openvim) service in a screen"
+    echo -e "    --insert-bashrc  insert the created tenant,datacenter variables at"
+    echo -e "                     ~/.bashrc to be available by openmano CLI"
+    echo -e "    --install-openvim   installs openvim in test mode"
+    echo -e "    --init-openvim --initopenvim    if openvim runs locally, initopenvim is called to clean openvim"\
+            "database, create osm tenant and add fake hosts"
+}
+
+function is_valid_uuid(){
+    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+    return 1
+}
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+
+#detect paths of executables, preceding the relative paths
+openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
+service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
+    service_openmano="$DIRscript/service-openmano"
+initopenvim="initopenvim"
+openvim="openvim"
+
+[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
+
+
+#process options
+source ${DIRscript}/get-options.sh "force:f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
+                $* || $_exit 1
+
+#help
+[ -n "$option_help" ] && usage && $_exit 0
+
+#check correct arguments
+force_param="" && [[ -n "$option_force" ]] && force_param=" -f"
+insert_bashrc_param="" && [[ -n "$option_insert_bashrc" ]] && insert_bashrc_param=" --insert-bashrc"
+screen_mano_param="" && [[ -n "$option_screen" ]] && screen_mano_param=" --screen-name=mano" 
+screen_vim_param=""  && [[ -n "$option_screen" ]] && screen_vim_param=" --screen-name=vim" 
+
+action_list=""
+
+for argument in $params
+do
+    if [[ $argument == reset ]] || [[ $argument == create ]] || [[ $argument == delete ]] ||
+       [[ $argument == add-openvim ]] || [[ $argument == del-openvim ]] ||  [[ $argument == delete-all ]] ||
+       [[ -z "$argument" ]]
+    then
+        action_list="$action_list $argument"
+        continue
+    fi
+    echo "invalid argument '$argument'?  Type -h for help" >&2 && $_exit 1
+done
+
+export OPENMANO_HOST=localhost
+export OPENMANO_PORT=9090
+[[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_HOST=localhost"  >> ~/.bashrc
+[[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_PORT=9090"  >> ~/.bashrc
+
+
+#by default action should be reset and create
+[[ -z $action_list ]]  && action_list="reset add-openvim create delete del-openvim"
+
+if [[ -n "$option_install_openvim" ]] 
+then
+    echo
+    echo "action: install openvim"
+    echo "################################"
+    mkdir -p ${DIRNAME}/local
+    pushd ${DIRNAME}/local
+    echo "installing openvim at  ${DIRNAME}/openvim ... "
+    wget -O install-openvim.sh "https://osm.etsi.org/gitweb/?p=osm/openvim.git;a=blob_plain;f=scripts/install-openvim.sh"
+    chmod +x install-openvim.sh
+    sudo ./install-openvim.sh --no-install-packages --force --quiet --develop
+    openvim="${DIRNAME}/local/openvim/openvim"
+    #force inito-penvim
+    option_init_openvim="-"
+    initopenvim="${DIRNAME}/local/openvim/scripts/initopenvim"
+    popd
+fi
+
+if [[ -n "$option_init_openvim" ]]
+then
+    echo
+    echo "action: init openvim"
+    echo "################################"
+    ${initopenvim} ${force_param}${insert_bashrc_param}${screen_vim_param} || \
+        echo "WARNING openvim cannot be initialized. The rest of test can fail!"
+fi
+
+#check openvim client variables are set
+#fail=""
+#[[ -z $OPENVIM_HOST ]] && echo "OPENVIM_HOST variable not defined" >&2 && fail=1
+#[[ -z $OPENVIM_PORT ]] && echo "OPENVIM_PORT variable not defined" >&2 && fail=1
+#[[ -n $fail ]] && $_exit 1
+
+
+for action in $action_list
+do
+    echo
+    echo "action: $action"
+    echo "################################"
+#if [[ $action == "install-openvim" ]]
+    #echo "Installing and starting openvim"
+    #mkdir -p temp
+    #pushd temp
+    #wget https://github.com/nfvlabs/openvim/raw/v0.4/scripts/install-openvim.sh
+    #chmod -x install-openvim.sh
+#fi
+
+if [[ $action == "reset" ]]
+then
+
+    #ask for confirmation if argument is not -f --force
+    force_=y
+    [[ -z "$option_force" ]] && read -e -p "WARNING: reset openmano database, content will be lost!!! Continue(y/N) " force_
+    [[ $force_ != y ]] && [[ $force_ != yes ]] && echo "aborted!" && $_exit
+
+    echo "Stopping openmano"
+    $service_openmano mano stop${screen_mano_param}
+    echo "Initializing openmano database"
+    $DIRmano/database_utils/init_mano_db.sh -u mano -p manopw
+    echo "Starting openmano"
+    $service_openmano mano start${screen_mano_param}
+    echo
+    printf "%-50s" "Creating openmano tenant 'osm': "
+    result=`$openmano tenant-create osm --description="created by basictest.sh"`
+    nfvotenant=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $nfvotenant && echo "FAIL" && echo "    $result" && $_exit 1
+    export OPENMANO_TENANT=osm
+    [[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_TENANT=osm"  >> ~/.bashrc
+    echo $nfvotenant
+
+elif [[ $action == "delete" ]]
+then
+    result=`openmano tenant-list osm`
+    nfvotenant=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    is_valid_uuid $nfvotenant || ! echo "Tenant osm not found. Already delete?" >&2 || $_exit 1
+    export OPENMANO_TENANT=$nfvotenant
+    $openmano instance-scenario-delete -f simple-instance     || echo "fail"
+    $openmano instance-scenario-delete -f complex-instance    || echo "fail"
+    $openmano instance-scenario-delete -f complex2-instance   || echo "fail"
+    $openmano instance-scenario-delete -f complex3-instance   || echo "fail"
+    $openmano instance-scenario-delete -f complex4-instance   || echo "fail"
+    $openmano instance-scenario-delete -f complex5-instance   || echo "fail"
+    $openmano instance-scenario-delete -f 3vdu_2vnf_nsd-instance       || echo "fail"
+    $openmano scenario-delete -f simple           || echo "fail"
+    $openmano scenario-delete -f complex          || echo "fail"
+    $openmano scenario-delete -f complex2         || echo "fail"
+    $openmano scenario-delete -f complex3         || echo "fail"
+    $openmano scenario-delete -f complex4         || echo "fail"
+    $openmano scenario-delete -f complex5         || echo "fail"
+    $openmano scenario-delete -f osm_id=3vdu_2vnf_nsd  || echo "fail"
+    $openmano vnf-delete -f linux                 || echo "fail"
+    $openmano vnf-delete -f linux_2VMs_v02        || echo "fail"
+    $openmano vnf-delete -f dataplaneVNF_2VMs     || echo "fail"
+    $openmano vnf-delete -f dataplaneVNF_2VMs_v02 || echo "fail"
+    $openmano vnf-delete -f dataplaneVNF1         || echo "fail"
+    $openmano vnf-delete -f dataplaneVNF2         || echo "fail"
+    $openmano vnf-delete -f dataplaneVNF3         || echo "fail"
+    $openmano vnf-delete -f dataplaneVNF4         || echo "fail"
+    $openmano vnf-delete -f osm_id=3vdu_vnfd      || echo "fail"
+
+elif [[ $action == "delete-all" ]]
+then
+    for i in instance-scenario scenario vnf
+    do
+        for f in `$openmano $i-list | awk '{print $1}'`
+        do
+            [[ -n "$f" ]] && [[ "$f" != No ]] && $openmano ${i}-delete -f ${f}
+        done
+    done
+
+elif [[ $action == "del-openvim" ]]
+then
+    $openmano datacenter-detach local-openvim           || echo "fail"
+    $openmano datacenter-delete -f local-openvim        || echo "fail"
+
+elif [[ $action == "add-openvim" ]]
+then
+
+    printf "%-50s" "Creating datacenter 'local-openvim' at openmano:"
+    [[ -z $OPENVIM_HOST ]] && OPENVIM_HOST=localhost
+    [[ -z $OPENVIM_PORT ]] && OPENVIM_PORT=9080
+    URL_ADMIN_PARAM=""
+    [[ -n $OPENVIM_ADMIN_PORT ]] && URL_ADMIN_PARAM=" --url_admin=http://${OPENVIM_HOST}:${OPENVIM_ADMIN_PORT}/openvim"
+    result=`$openmano datacenter-create local-openvim "http://${OPENVIM_HOST}:${OPENVIM_PORT}/openvim" \
+            --type=openvim${URL_ADMIN_PARAM} --config="{test: no use just for test}"`
+    datacenter=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && $_exit 1
+    echo $datacenter
+    export OPENMANO_DATACENTER=local-openvim
+    [[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_DATACENTER=local-openvim"  >> ~/.bashrc
+
+    printf "%-50s" "Attaching openmano tenant to the datacenter:"
+    result=`$openmano datacenter-attach local-openvim --vim-tenant-name=osm --config="{test: no use just for test}"`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result" && $_exit 1
+    echo OK
+
+    printf "%-50s" "Updating external nets in openmano: "
+    result=`$openmano datacenter-netmap-delete -f --all`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
+    result=`$openmano datacenter-netmap-import -f`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
+    echo OK
+    result=`$openmano datacenter-netmap-create --name=default --vim-name=mgmt`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
+    echo OK
+
+elif [[ $action == "create" ]]
+then
+    for VNF in linux dataplaneVNF1 dataplaneVNF2 dataplaneVNF_2VMs dataplaneVNF_2VMs_v02 dataplaneVNF3 linux_2VMs_v02 dataplaneVNF4
+    do    
+        printf "%-50s" "Creating VNF '${VNF}': "
+        result=`$openmano vnf-create $DIRmano/vnfs/examples/${VNF}.yaml`
+        vnf=`echo $result |gawk '{print $1}'`
+        #check a valid uuid is obtained
+        ! is_valid_uuid $vnf && echo FAIL && echo "    $result" &&  $_exit 1
+        echo $vnf
+    done
+
+    printf "%-50s" "Creating VNF '${VNF}': "
+    result=`$openmano vnf-create $DIRmano/vnfs/examples/v3_3vdu_vnfd.yaml --image-name=cirros034`
+    vnf=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" &&  $_exit 1
+    echo $vnf
+
+    for NS in simple complex complex2 complex3 complex4 complex5 v3_3vdu_2vnf_nsd
+    do
+        printf "%-50s" "Creating scenario '${NS}':"
+        result=`$openmano scenario-create $DIRmano/scenarios/examples/${NS}.yaml`
+        scenario=`echo $result |gawk '{print $1}'`
+        ! is_valid_uuid $scenario && echo FAIL && echo "    $result" &&  $_exit 1
+        echo $scenario
+    done
+
+    for IS in simple complex complex2 complex3 complex5 osm_id=3vdu_2vnf_nsd
+    do
+        printf "%-50s" "Creating instance-scenario '${IS}':"
+        result=`$openmano instance-scenario-create  --scenario ${IS} --name ${IS#osm_id=}-instance`
+        instance=`echo $result |gawk '{print $1}'`
+        ! is_valid_uuid $instance && echo FAIL && echo "    $result" &&  $_exit 1
+        echo $instance
+    done
+
+    printf "%-50s" "Creating instance-scenario 'complex4':"
+    result=`$openmano instance-scenario-create $DIRmano/instance-scenarios/examples/instance-creation-complex4.yaml`
+    instance=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $instance && echo FAIL && echo "    $result" &&  $_exit 1
+    echo $instance
+
+    echo
+    #echo "Check virtual machines are deployed"
+    #vms_error=`openvim vm-list | grep ERROR | wc -l`
+    #vms=`openvim vm-list | wc -l`
+    #[[ $vms -ne 8 ]]       &&  echo "WARNING: $vms VMs created, must be 8 VMs" >&2 && $_exit 1
+    #[[ $vms_error -gt 0 ]] &&  echo "WARNING: $vms_error VMs with ERROR" >&2       && $_exit 1
+fi
+done
+
+echo
+echo DONE
+
+
diff --git a/RO/test/test-multivim.sh b/RO/test/test-multivim.sh
new file mode 100755 (executable)
index 0000000..077717d
--- /dev/null
@@ -0,0 +1,195 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script is a basic test for openmano, that deals with two openvim
+#stopping on an error
+#WARNING: It destroy the database content
+
+
+function usage(){
+    echo -e "usage: ${BASH_SOURCE[0]} [-f]\n  Deletes openvim/openmano content and make automatically the wiki steps"
+    echo -e "  at 'https://github.com/nfvlabs/openmano/wiki/Getting-started#how-to-use-it'"
+    echo -e "  OPTIONS:"
+    echo -e "    -f --force : does not prompt for confirmation"
+    echo -e "    -h --help  : shows this help"
+}
+
+function is_valid_uuid(){
+    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+    return 1
+}
+
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
+
+#check correct arguments
+[[ -n $1 ]] && [[ $1 != -h ]] && [[ $1 != --help ]] && [[ $1 != -f ]] && [[ $1 != --force ]] && \
+   echo "invalid argument '$1'?" &&  usage >&2 && $_exit 1
+[[ $1 == -h ]] || [[ $1 == --help ]]  && usage && $_exit 0
+
+#ask for confirmation if argument is not -f --force
+force=""
+[[ $1 == -f ]] || [[ $1 == --force ]] && force=y
+[[ $force != y ]] && read -e -p "WARNING: openmano and openvim database content will be lost!!!  Continue(y/N)" force
+[[ $force != y ]] && [[ $force != yes ]] && echo "aborted!" && $_exit
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIR_BASE=$(dirname $DIRNAME)
+DIR_BASE=$(dirname $DIR_BASE)
+DIRvim=$DIR_BASE/openvim
+DIRmano=$DIR_BASE/openmano
+DIRscripts=$DIR_BASE/scripts
+
+echo "deleting deployed vm"
+openvim vm-delete -f | grep -q deleted && sleep 10 #give some time to get virtual machines deleted
+
+echo "Stopping openmano"
+$DIRscripts/service-openmano stop
+
+echo "Initializing databases"
+$DIRvim/database_utils/init_vim_db.sh -u vim -p vimpw
+$DIRmano/database_utils/init_mano_db.sh -u mano -p manopw
+
+echo "Starting openmano"
+$DIRscripts/service-openmano start
+
+echo "Creating openmano tenant 'mytenant'"
+nfvotenant=`openmano tenant-create mytenant --description=mytenant |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $nfvotenant || ! echo "fail" >&2 || $_exit 1 
+export OPENMANO_TENANT=$nfvotenant
+echo "  $nfvotenant"
+
+echo "Adding example hosts"
+openvim host-add $DIRvim/test/hosts/host-example0.json || ! echo "fail" >&2 || $_exit 1
+openvim host-add $DIRvim/test/hosts/host-example1.json || ! echo "fail" >&2 || $_exit 1
+openvim host-add $DIRvim/test/hosts/host-example2.json || ! echo "fail" >&2 || $_exit 1
+openvim host-add $DIRvim/test/hosts/host-example3.json || ! echo "fail" >&2 || $_exit 1
+echo "Adding example nets"
+openvim net-create $DIRvim/test/networks/net-example0.yaml || ! echo "fail" >&2 || $_exit 1
+openvim net-create $DIRvim/test/networks/net-example1.yaml || ! echo "fail" >&2 || $_exit 1
+openvim net-create $DIRvim/test/networks/net-example2.yaml || ! echo "fail" >&2 || $_exit 1
+openvim net-create $DIRvim/test/networks/net-example3.yaml || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating openvim tenant 'admin'"
+vimtenant=`openvim tenant-create '{"tenant": {"name":"admin", "description":"admin"}}' |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $vimtenant || ! echo "fail" >&2 || $_exit 1
+echo "  $vimtenant"
+OPENVIM_TENANT_1=$vimtenant && export OPENVIM_TENANT=$vimtenant
+
+echo "Creating datacenter 'mydc1' in openmano"
+datacenter=`openmano datacenter-create mydc1 http://localhost:9080/openvim |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $datacenter || ! echo "fail" >&2 || $_exit 1 
+echo "  $datacenter"
+OPENMANO_DATACENTER_1=$datacenter && export OPENMANO_DATACENTER=$datacenter
+
+echo "Attaching openmano tenant to the datacenter and the openvim tenant"
+openmano datacenter-attach mydc1 --vim-tenant-id $vimtenant || ! echo "fail" >&2 || $_exit 1 
+
+echo "Updating external nets in openmano"
+openmano datacenter-net-update -f mydc1 || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating a second fake datacenter 'mydc2' in openmano"
+datacenter2=`openmano datacenter-create mydc2 http://localhost:9082/openvim |gawk '{print $1}'`
+#check a valid uuid is obtained
+is_valid_uuid $datacenter || ! echo "fail" >&2 || $_exit 1 
+echo "  $datacenter2"
+OPENMANO_DATACENTER_2=$datacenter2
+echo "Attaching a second fake openvim 'mydc2'"
+openmano datacenter-attach mydc2 --vim-tenant-id $vimtenant || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating VNFs, must fail in second openvim"
+openmano vnf-create $DIRmano/vnfs/examples/linux.yaml         || ! echo "fail" >&2 || $_exit 1
+openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF1.yaml || ! echo "fail" >&2 || $_exit 1
+openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF2.yaml || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking images and flavors created at openvim"
+nb=`openvim image-list | wc -l`
+echo -n " $nb images "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+echo " $nb flavors "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+
+echo "Creating Scenarios"
+openmano scenario-create $DIRmano/scenarios/examples/simple.yaml  || ! echo "fail" >&2 || $_exit 1
+openmano scenario-create $DIRmano/scenarios/examples/complex.yaml || ! echo "fail" >&2 || $_exit 1
+
+echo "Deleting openvim images and flavors to force reload again"
+openvim image-delete -f
+openvim flavor-delete -f
+
+echo "Launching scenarios"
+openmano scenario-deploy simple simple-instance   || ! echo "fail" >&2 || $_exit 1
+openmano scenario-deploy complex complex-instance || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking that openvim has 5 VM running"
+nb=`openvim vm-list | wc -l`
+[[ $nb -eq 5 ]] || ! echo "fail" >&2 || $_exit 1
+while openvim vm-list | grep -q CREATING ; do sleep 1; done
+openvim vm-list | grep -v -q ERROR || ! echo "fail: VM with error" >&2 || $_exit 1
+
+echo "Removing scenarios"
+for scenario in `openmano instance-scenario-list  | awk '{print $2}'`
+do
+  openmano instance-scenario-delete -f $scenario
+done
+
+echo "Editing datacenters so that Changing openvim Working with the second openvim"
+openmano datacenter-edit -f mydc1 'vim_url: http://localhost:9083/openvim'
+openmano datacenter-edit -f mydc2 'vim_url: http://localhost:9080/openvim'
+export OPENMANO_DATACENTER=$OPENMANO_DATACENTER_2
+
+echo "Updating external nets in openmano for second datacenter"
+openmano datacenter-net-update -f mydc2 || ! echo "fail" >&2 || $_exit 1
+
+echo "Launching Scenario instances"
+openmano scenario-deploy simple simple-instance   || ! echo "fail" >&2 || $_exit 1
+openmano scenario-deploy complex complex-instance || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking images and flavors created at openvim"
+nb=`openvim image-list | wc -l`
+echo -n " $nb images "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+echo " $nb flavors "
+[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
+
+echo "Checking that openvim has 5 VM running"
+nb=`openvim vm-list | wc -l`
+[[ $nb -eq 5 ]] || ! echo "fail" >&2 || $_exit 1
+while openvim vm-list | grep -q CREATING ; do sleep 1; done
+openvim vm-list | grep -v -q ERROR || ! echo "fail: VM with error" >&2 || $_exit 1
+
+
+echo
+echo DONE
+#echo "Listing VNFs"
+#openmano vnf-list
+#echo "Listing scenarios"
+#openmano scenario-list
+#echo "Listing scenario instances"
+#openmano instance-scenario-list
+
+
diff --git a/RO/test/test_RO.py b/RO/test/test_RO.py
new file mode 100755 (executable)
index 0000000..5d17087
--- /dev/null
@@ -0,0 +1,2549 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2017
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+"""
+Module for testing openmano functionality. It uses openmanoclient.py for invoking openmano
+"""
+
+import logging
+import os
+import argcomplete
+import unittest
+import string
+import inspect
+import random
+# import traceback
+import glob
+import yaml
+import sys
+import time
+import uuid
+from argparse import ArgumentParser
+
+__author__ = "Pablo Montes, Alfonso Tierno"
+__date__ = "$16-Feb-2017 17:08:16$"
+__version__ = "0.1.0"
+version_date = "Oct 2017"
+
+test_config = {}    # used for global variables with the test configuration
+
+
+class test_base(unittest.TestCase):
+    test_index = 1
+    test_text = None
+
+    @classmethod
+    def setUpClass(cls):
+        logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
+
+    @classmethod
+    def tearDownClass(cls):
+        test_config["test_number"] += 1
+
+    def tearDown(self):
+        exec_info = sys.exc_info()
+        if exec_info == (None, None, None):
+            logger.info(self.__class__.test_text+" -> TEST OK")
+        else:
+            logger.warning(self.__class__.test_text+" -> TEST NOK")
+            logger.critical("Traceback error",exc_info=True)
+
+
+def check_instance_scenario_active(uuid):
+    instance = test_config["client"].get_instance(uuid=uuid)
+
+    for net in instance['nets']:
+        status = net['status']
+        if status != 'ACTIVE':
+            return (False, status)
+
+    for vnf in instance['vnfs']:
+        for vm in vnf['vms']:
+            status = vm['status']
+            if status != 'ACTIVE':
+                return (False, status)
+
+    return (True, None)
+
+
+'''
+IMPORTANT NOTE
+All unittest classes for code based tests must have prefix 'test_' in order to be taken into account for tests
+'''
+class test_VIM_datacenter_tenant_operations(test_base):
+    tenant_name = None
+
+    def test_000_create_RO_tenant(self):
+        self.__class__.tenant_name = _get_random_string(20)
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        tenant = test_config["client"].create_tenant(name=self.__class__.tenant_name,
+                                                     description=self.__class__.tenant_name)
+        logger.debug("{}".format(tenant))
+        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
+
+    def test_010_list_RO_tenant(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        tenant = test_config["client"].get_tenant(name=self.__class__.tenant_name)
+        logger.debug("{}".format(tenant))
+        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
+
+    def test_020_delete_RO_tenant(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        tenant = test_config["client"].delete_tenant(name=self.__class__.tenant_name)
+        logger.debug("{}".format(tenant))
+        assert('deleted' in tenant.get('result',""))
+
+
+class test_VIM_datacenter_operations(test_base):
+    datacenter_name = None
+
+    def test_000_create_datacenter(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.datacenter_name = _get_random_string(20)
+        self.__class__.test_index += 1
+        self.datacenter = test_config["client"].create_datacenter(name=self.__class__.datacenter_name,
+                                                                  vim_url="http://fakeurl/fake")
+        logger.debug("{}".format(self.datacenter))
+        self.assertEqual (self.datacenter.get('datacenter', {}).get('name',''), self.__class__.datacenter_name)
+
+    def test_010_list_datacenter(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        self.datacenter = test_config["client"].get_datacenter(all_tenants=True, name=self.__class__.datacenter_name)
+        logger.debug("{}".format(self.datacenter))
+        self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
+
+    def test_020_attach_datacenter(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        self.datacenter = test_config["client"].attach_datacenter(name=self.__class__.datacenter_name,
+                                                                  vim_tenant_name='fake')
+        logger.debug("{}".format(self.datacenter))
+        assert ('uuid' in self.datacenter.get('datacenter', {}))
+
+    def test_030_list_attached_datacenter(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        self.datacenter = test_config["client"].get_datacenter(all_tenants=False, name=self.__class__.datacenter_name)
+        logger.debug("{}".format(self.datacenter))
+        self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
+
+    def test_040_detach_datacenter(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        self.datacenter = test_config["client"].detach_datacenter(name=self.__class__.datacenter_name)
+        logger.debug("{}".format(self.datacenter))
+        assert ('detached' in self.datacenter.get('result', ""))
+
+    def test_050_delete_datacenter(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        self.datacenter = test_config["client"].delete_datacenter(name=self.__class__.datacenter_name)
+        logger.debug("{}".format(self.datacenter))
+        assert('deleted' in self.datacenter.get('result',""))
+
+
+class test_VIM_network_operations(test_base):
+    vim_network_name = None
+    vim_network_uuid = None
+
+    def test_000_create_VIM_network(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.vim_network_name = _get_random_string(20)
+        self.__class__.test_index += 1
+        network = test_config["client"].vim_action("create", "networks", name=self.__class__.vim_network_name)
+        logger.debug("{}".format(network))
+        self.__class__.vim_network_uuid = network["network"]["id"]
+        self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
+
+    def test_010_list_VIM_networks(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        networks = test_config["client"].vim_action("list", "networks")
+        logger.debug("{}".format(networks))
+
+    def test_020_get_VIM_network_by_uuid(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        network = test_config["client"].vim_action("show", "networks", uuid=self.__class__.vim_network_uuid)
+        logger.debug("{}".format(network))
+        self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
+
+    def test_030_delete_VIM_network_by_uuid(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        network = test_config["client"].vim_action("delete", "networks", uuid=self.__class__.vim_network_uuid)
+        logger.debug("{}".format(network))
+        assert ('deleted' in network.get('result', ""))
+
+
+class test_VIM_image_operations(test_base):
+
+    def test_000_list_VIM_images(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        images = test_config["client"].vim_action("list", "images")
+        logger.debug("{}".format(images))
+
+'''
+The following is a non critical test that will fail most of the times.
+In case of OpenStack datacenter these tests will only success if RO has access to the admin endpoint
+This test will only be executed in case it is specifically requested by the user
+'''
+class test_VIM_tenant_operations(test_base):
+    vim_tenant_name = None
+    vim_tenant_uuid = None
+
+    @classmethod
+    def setUpClass(cls):
+        test_base.setUpClass(cls)
+        logger.warning("In case of OpenStack datacenter these tests will only success "
+                       "if RO has access to the admin endpoint")
+
+    def test_000_create_VIM_tenant(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.vim_tenant_name = _get_random_string(20)
+        self.__class__.test_index += 1
+        tenant = test_config["client"].vim_action("create", "tenants", name=self.__class__.vim_tenant_name)
+        logger.debug("{}".format(tenant))
+        self.__class__.vim_tenant_uuid = tenant["tenant"]["id"]
+        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
+
+    def test_010_list_VIM_tenants(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        tenants = test_config["client"].vim_action("list", "tenants")
+        logger.debug("{}".format(tenants))
+
+    def test_020_get_VIM_tenant_by_uuid(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        tenant = test_config["client"].vim_action("show", "tenants", uuid=self.__class__.vim_tenant_uuid)
+        logger.debug("{}".format(tenant))
+        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
+
+    def test_030_delete_VIM_tenant_by_uuid(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        tenant = test_config["client"].vim_action("delete", "tenants", uuid=self.__class__.vim_tenant_uuid)
+        logger.debug("{}".format(tenant))
+        assert ('deleted' in tenant.get('result', ""))
+
+
+class test_vimconn_connect(test_base):
+
+    def test_000_connect(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+
+        self.__class__.test_index += 1
+        if test_config['vimtype'] == 'vmware':
+            vca_object = test_config["vim_conn"].connect()
+            logger.debug("{}".format(vca_object))
+            self.assertIsNotNone(vca_object)
+        elif test_config['vimtype'] == 'openstack':
+            test_config["vim_conn"]._reload_connection()
+            network_list = test_config["vim_conn"].get_network_list()
+            logger.debug("{}".format(network_list))
+            self.assertIsNotNone(network_list)
+
+class test_vimconn_new_network(test_base):
+    network_name = None
+
+    def test_000_new_network(self):
+        self.__class__.network_name = _get_random_string(20)
+        network_type = 'bridge'
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                     self.__class__.test_index, inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                          net_type=network_type)
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+
+        network_list = test_config["vim_conn"].get_network_list()
+        for net in network_list:
+            if self.__class__.network_name in net.get('name'):
+                self.assertIn(self.__class__.network_name, net.get('name'))
+                self.assertEqual(net.get('type'), network_type)
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_010_new_network_by_types(self):
+        delete_net_ids = []
+        network_types = ['data','bridge','mgmt']
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        for net_type in network_types:
+            self.__class__.network_name = _get_random_string(20)
+            network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                                net_type=net_type)
+
+            delete_net_ids.append(network_id)
+            logger.debug("{}".format(network_id))
+
+            network_list = test_config["vim_conn"].get_network_list()
+            for net in network_list:
+                if self.__class__.network_name in net.get('name'):
+                    self.assertIn(self.__class__.network_name, net.get('name'))
+                if net_type in net.get('type'):
+                    self.assertEqual(net.get('type'), net_type)
+                else:
+                    self.assertNotEqual(net.get('type'), net_type)
+
+        # Deleting created network
+        for net_id in delete_net_ids:
+            result = test_config["vim_conn"].delete_network(net_id)
+            if result:
+                logger.info("Network id {} sucessfully deleted".format(net_id))
+            else:
+                logger.info("Failed to delete network id {}".format(net_id))
+
+    def test_020_new_network_by_ipprofile(self):
+        test_directory_content = os.listdir(test_config["test_directory"])
+
+        for dir_name in test_directory_content:
+            if dir_name == 'simple_multi_vnfc':
+                self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
+                vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
+                break
+
+        for vnfd in vnfd_files:
+            with open(vnfd, 'r') as stream:
+                vnf_descriptor = yaml.load(stream, Loader=yaml.Loader)
+
+            internal_connections_list = vnf_descriptor['vnf']['internal-connections']
+            for item in internal_connections_list:
+                if 'ip-profile' in item:
+                    version = item['ip-profile']['ip-version']
+                    dhcp_count = item['ip-profile']['dhcp']['count']
+                    dhcp_enabled = item['ip-profile']['dhcp']['enabled']
+                    dhcp_start_address = item['ip-profile']['dhcp']['start-address']
+                    subnet_address = item['ip-profile']['subnet-address']
+
+
+        self.__class__.network_name = _get_random_string(20)
+        ip_profile = {'dhcp_count': dhcp_count,
+                      'dhcp_enabled': dhcp_enabled,
+                      'dhcp_start_address': dhcp_start_address,
+                      'ip_version': version,
+                      'subnet_address': subnet_address
+                     }
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                           net_type='mgmt',
+                                                                     ip_profile=ip_profile)
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+
+        network_list = test_config["vim_conn"].get_network_list()
+        for net in network_list:
+            if self.__class__.network_name in net.get('name'):
+                self.assertIn(self.__class__.network_name, net.get('name'))
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_030_new_network_by_isshared(self):
+        self.__class__.network_name = _get_random_string(20)
+        shared = True
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                         net_type='bridge',
+                                                                             shared=shared)
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+
+        network_list = test_config["vim_conn"].get_network_list()
+        for net in network_list:
+            if self.__class__.network_name in net.get('name'):
+                self.assertIn(self.__class__.network_name, net.get('name'))
+                self.assertEqual(net.get('shared'), shared)
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_040_new_network_by_negative(self):
+        self.__class__.network_name = _get_random_string(20)
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                    net_type='unknowntype')
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+        network_list = test_config["vim_conn"].get_network_list()
+        for net in network_list:
+            if self.__class__.network_name in net.get('name'):
+                self.assertIn(self.__class__.network_name, net.get('name'))
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_050_refresh_nets_status(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        # creating new network
+        network_name = _get_random_string(20)
+        net_type = 'bridge'
+        network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
+                                                          net_type=net_type)
+        # refresh net status
+        net_dict = test_config["vim_conn"].refresh_nets_status([network_id])
+        for attr in net_dict[network_id]:
+            if attr == 'status':
+                self.assertEqual(net_dict[network_id][attr], 'ACTIVE')
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(network_id))
+
+    def test_060_refresh_nets_status_negative(self):
+        unknown_net_id = str(uuid.uuid4())
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # refresh net status
+        net_dict = test_config["vim_conn"].refresh_nets_status([unknown_net_id])
+        if test_config['vimtype'] == 'openstack':
+            self.assertEqual(net_dict[unknown_net_id]['status'], 'DELETED')
+        else:
+            # TODO : Fix vmware connector to return status DELETED as per vimconn.py
+            self.assertEqual(net_dict, {})
+
+class test_vimconn_get_network_list(test_base):
+    network_name = None
+
+    def setUp(self):
+        # creating new network
+        self.__class__.network_name = _get_random_string(20)
+        self.__class__.net_type = 'bridge'
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                          net_type=self.__class__.net_type)
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+
+    def tearDown(self):
+        test_base.tearDown(self)
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_000_get_network_list(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        network_list = test_config["vim_conn"].get_network_list()
+        for net in network_list:
+            if self.__class__.network_name in net.get('name'):
+                self.assertIn(self.__class__.network_name, net.get('name'))
+                self.assertEqual(net.get('type'), self.__class__.net_type)
+                self.assertEqual(net.get('status'), 'ACTIVE')
+                self.assertEqual(net.get('shared'), False)
+
+    def test_010_get_network_list_by_name(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        if test_config['vimtype'] == 'openstack':
+            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+        else:
+            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+
+        # find network from list by it's name
+        new_network_list = test_config["vim_conn"].get_network_list({'name': network_name})
+        for list_item in new_network_list:
+            if self.__class__.network_name in list_item.get('name'):
+                self.assertEqual(network_name, list_item.get('name'))
+                self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                self.assertEqual(list_item.get('status'), 'ACTIVE')
+
+    def test_020_get_network_list_by_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # find network from list by it's id
+        new_network_list = test_config["vim_conn"].get_network_list({'id':self.__class__.network_id})
+        for list_item in new_network_list:
+            if self.__class__.network_id in list_item.get('id'):
+                self.assertEqual(self.__class__.network_id, list_item.get('id'))
+                self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                self.assertEqual(list_item.get('status'), 'ACTIVE')
+
+    def test_030_get_network_list_by_shared(self):
+        Shared = False
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        if test_config['vimtype'] == 'openstack':
+            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+        else:
+            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+        # find network from list by it's shared value
+        new_network_list = test_config["vim_conn"].get_network_list({'shared':Shared,
+                                                                'name':network_name})
+        for list_item in new_network_list:
+            if list_item.get('shared') == Shared:
+                self.assertEqual(list_item.get('shared'), Shared)
+                self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                self.assertEqual(network_name, list_item.get('name'))
+
+    def test_040_get_network_list_by_tenant_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        tenant_list = test_config["vim_conn"].get_tenant_list()
+        if test_config['vimtype'] == 'openstack':
+            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+        else:
+            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+
+        for tenant_item in tenant_list:
+            if test_config['tenant'] == tenant_item.get('name'):
+                # find network from list by it's tenant id
+                tenant_id = tenant_item.get('id')
+                new_network_list = test_config["vim_conn"].get_network_list({'tenant_id':tenant_id,
+                                                                              'name':network_name})
+                for list_item in new_network_list:
+                    self.assertEqual(tenant_id, list_item.get('tenant_id'))
+                    self.assertEqual(network_name, list_item.get('name'))
+                    self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                    self.assertEqual(list_item.get('status'), 'ACTIVE')
+
+    def test_050_get_network_list_by_status(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        status = 'ACTIVE'
+
+        if test_config['vimtype'] == 'openstack':
+            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+        else:
+            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+
+        # find network from list by it's status
+        new_network_list = test_config["vim_conn"].get_network_list({'status':status,
+                                                               'name': network_name})
+        for list_item in new_network_list:
+            self.assertIn(self.__class__.network_name, list_item.get('name'))
+            self.assertEqual(list_item.get('type'), self.__class__.net_type)
+            self.assertEqual(list_item.get('status'), status)
+
+    def test_060_get_network_list_by_negative(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        network_list = test_config["vim_conn"].get_network_list({'name': 'unknown_name'})
+        self.assertEqual(network_list, [])
+
+class test_vimconn_get_network(test_base):
+    network_name = None
+
+    def setUp(self):
+        # creating new network
+        self.__class__.network_name = _get_random_string(20)
+        self.__class__.net_type = 'bridge'
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                          net_type=self.__class__.net_type)
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+
+    def tearDown(self):
+        test_base.tearDown(self)
+
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_000_get_network(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        network_info = test_config["vim_conn"].get_network(self.__class__.network_id)
+        self.assertEqual(network_info.get('status'), 'ACTIVE')
+        self.assertIn(self.__class__.network_name, network_info.get('name'))
+        self.assertEqual(network_info.get('type'), self.__class__.net_type)
+        self.assertEqual(network_info.get('id'), self.__class__.network_id)
+
+    def test_010_get_network_negative(self):
+        Non_exist_id = str(uuid.uuid4())
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].get_network(Non_exist_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_delete_network(test_base):
+    network_name = None
+
+    def test_000_delete_network(self):
+        # Creating network
+        self.__class__.network_name = _get_random_string(20)
+        self.__class__.net_type = 'bridge'
+        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                          net_type=self.__class__.net_type)
+        self.__class__.network_id = network
+        logger.debug("{}".format(network))
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+        time.sleep(5)
+        # after deleting network we check in network list
+        network_list = test_config["vim_conn"].get_network_list({ 'id':self.__class__.network_id })
+        self.assertEqual(network_list, [])
+
+    def test_010_delete_network_negative(self):
+        Non_exist_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].delete_network(Non_exist_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_get_flavor(test_base):
+
+    def test_000_get_flavor(self):
+        test_directory_content = os.listdir(test_config["test_directory"])
+
+        for dir_name in test_directory_content:
+            if dir_name == 'simple_linux':
+                self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
+                vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
+                break
+
+        for vnfd in vnfd_files:
+            with open(vnfd, 'r') as stream:
+                vnf_descriptor = yaml.load(stream, Loader=yaml.Loader)
+
+            vnfc_list = vnf_descriptor['vnf']['VNFC']
+            for item in vnfc_list:
+                if 'ram' in item and 'vcpus' in item and 'disk' in item:
+                    ram = item['ram']
+                    vcpus = item['vcpus']
+                    disk = item['disk']
+
+        flavor_data = {
+                      'name' : _get_random_string(20),
+                      'ram': ram,
+                      'vcpus': vcpus,
+                      'disk': disk
+                    }
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+        # get flavor by id
+        result = test_config["vim_conn"].get_flavor(flavor_id)
+        self.assertEqual(ram, result['ram'])
+        self.assertEqual(vcpus, result['vcpus'])
+        self.assertEqual(disk, result['disk'])
+
+        # delete flavor
+        result = test_config["vim_conn"].delete_flavor(flavor_id)
+        if result:
+            logger.info("Flavor id {} sucessfully deleted".format(result))
+        else:
+            logger.info("Failed to delete flavor id {}".format(result))
+
+    def test_010_get_flavor_negative(self):
+        Non_exist_flavor_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].get_flavor(Non_exist_flavor_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_new_flavor(test_base):
+    flavor_id = None
+
+    def test_000_new_flavor(self):
+        flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vpcus': 1, 'disk': 10}
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # create new flavor
+        self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+        self.assertIsInstance(self.__class__.flavor_id, (str, unicode))
+        self.assertIsInstance(uuid.UUID(self.__class__.flavor_id), uuid.UUID)
+
+    def test_010_delete_flavor(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # delete flavor
+        result = test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
+        if result:
+            logger.info("Flavor id {} sucessfully deleted".format(result))
+        else:
+            logger.error("Failed to delete flavor id {}".format(result))
+            raise Exception ("Failed to delete created flavor")
+
+    def test_020_new_flavor_negative(self):
+        Invalid_flavor_data = {'ram': '1024', 'vcpus': 2.0, 'disk': 2.0}
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_flavor(Invalid_flavor_data)
+
+        self.assertEqual((context.exception).http_code, 400)
+
+    def test_030_delete_flavor_negative(self):
+        Non_exist_flavor_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].delete_flavor(Non_exist_flavor_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+# class test_vimconn_new_image(test_base):
+#
+#     def test_000_new_image(self):
+#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+#                                                             self.__class__.test_index,
+#                                                 inspect.currentframe().f_code.co_name)
+#         self.__class__.test_index += 1
+#
+#         image_path = test_config['image_path']
+#         if image_path:
+#             self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path, 'metadata': {'upload_location':None} })
+#             time.sleep(20)
+#
+#             self.assertIsInstance(self.__class__.image_id, (str, unicode))
+#             self.assertIsInstance(uuid.UUID(self.__class__.image_id), uuid.UUID)
+#         else:
+#             self.skipTest("Skipping test as image file not present at RO container")
+#
+#     def test_010_new_image_negative(self):
+#         Non_exist_image_path = '/temp1/cirros.ovf'
+#
+#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+#                                                             self.__class__.test_index,
+#                                                 inspect.currentframe().f_code.co_name)
+#         self.__class__.test_index += 1
+#
+#         with self.assertRaises(Exception) as context:
+#             test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path})
+#
+#         self.assertEqual((context.exception).http_code, 400)
+#
+#     def test_020_delete_image(self):
+#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+#                                                             self.__class__.test_index,
+#                                                 inspect.currentframe().f_code.co_name)
+#         self.__class__.test_index += 1
+#
+#         image_id = test_config["vim_conn"].delete_image(self.__class__.image_id)
+#
+#         self.assertIsInstance(image_id, (str, unicode))
+#
+#     def test_030_delete_image_negative(self):
+#         Non_exist_image_id = str(uuid.uuid4())
+#
+#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+#                                                             self.__class__.test_index,
+#                                                 inspect.currentframe().f_code.co_name)
+#         self.__class__.test_index += 1
+#
+#         with self.assertRaises(Exception) as context:
+#             test_config["vim_conn"].delete_image(Non_exist_image_id)
+#
+#         self.assertEqual((context.exception).http_code, 404)
+
+# class test_vimconn_get_image_id_from_path(test_base):
+#
+#     def test_000_get_image_id_from_path(self):
+#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+#                                                             self.__class__.test_index,
+#                                                 inspect.currentframe().f_code.co_name)
+#         self.__class__.test_index += 1
+#
+#         image_path = test_config['image_path']
+#         if image_path:
+#             image_id = test_config["vim_conn"].get_image_id_from_path( image_path )
+#             self.assertEqual(type(image_id),str)
+#         else:
+#             self.skipTest("Skipping test as image file not present at RO container")
+#
+#     def test_010_get_image_id_from_path_negative(self):
+#         Non_exist_image_path = '/temp1/cirros.ovf'
+#
+#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+#                                                             self.__class__.test_index,
+#                                                 inspect.currentframe().f_code.co_name)
+#         self.__class__.test_index += 1
+#
+#         with self.assertRaises(Exception) as context:
+#             test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
+#
+#         self.assertEqual((context.exception).http_code, 400)
+
+class test_vimconn_get_image_list(test_base):
+    image_name = None
+    image_id = None
+
+    def test_000_get_image_list(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        image_list = test_config["vim_conn"].get_image_list()
+
+        for item in image_list:
+            if 'name' in item:
+                self.__class__.image_name = item['name']
+                self.__class__.image_id = item['id']
+                self.assertIsInstance(self.__class__.image_name, (str, unicode))
+                self.assertIsInstance(self.__class__.image_id, (str, unicode))
+
+    def test_010_get_image_list_by_name(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        image_list = test_config["vim_conn"].get_image_list({'name': self.__class__.image_name})
+
+        for item in image_list:
+            self.assertIsInstance(item['id'], (str, unicode))
+            self.assertIsInstance(item['name'], (str, unicode))
+            self.assertEqual(item['id'], self.__class__.image_id)
+            self.assertEqual(item['name'], self.__class__.image_name)
+
+    def test_020_get_image_list_by_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        filter_image_list = test_config["vim_conn"].get_image_list({'id': self.__class__.image_id})
+
+        for item1 in filter_image_list:
+            self.assertIsInstance(item1['id'], (str, unicode))
+            self.assertIsInstance(item1['name'], (str, unicode))
+            self.assertEqual(item1['id'], self.__class__.image_id)
+            self.assertEqual(item1['name'], self.__class__.image_name)
+
+    def test_030_get_image_list_negative(self):
+        Non_exist_image_id = uuid.uuid4()
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        image_list = test_config["vim_conn"].get_image_list({'name': 'Unknown_name', 'id': Non_exist_image_id})
+
+        self.assertIsNotNone(image_list, None)
+        self.assertEqual(image_list, [])
+
+class test_vimconn_new_vminstance(test_base):
+    network_name = None
+    net_type = None
+    network_id = None
+    image_id = None
+    instance_id = None
+
+    def setUp(self):
+        # create network
+        self.__class__.network_name = _get_random_string(20)
+        self.__class__.net_type = 'bridge'
+
+        self.__class__.network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                            net_type=self.__class__.net_type)
+        # find image name and image id
+        if test_config['image_name']:
+            image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+            if len(image_list) == 0:
+                raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+            else:
+                self.__class__.image_id = image_list[0]['id']
+        else:
+            image_list = test_config['vim_conn'].get_image_list()
+            if len(image_list) == 0:
+                raise Exception("Not found any image at VIM")
+            else:
+                self.__class__.image_id = image_list[0]['id']
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_000_new_vminstance(self):
+        vpci = "0000:00:11.0"
+        name = "eth0"
+
+        flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+        self.assertIsInstance(self.__class__.instance_id, (str, unicode))
+
+    def test_010_new_vminstance_by_model(self):
+        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+        model_name = 'e1000'
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,flavor_id=flavor_id,net_list=net_list)
+
+        self.assertIsInstance(instance_id, (str, unicode))
+
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_020_new_vminstance_by_net_use(self):
+        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+        net_use = 'data'
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,disk_list=None,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list)
+        self.assertIsInstance(instance_id, (str, unicode))
+
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_030_new_vminstance_by_net_type(self):
+        flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+        _type = 'VF'
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        if test_config['vimtype'] == 'vmware':
+            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+                         'type': _type, 'net_id': self.__class__.network_id}]
+
+            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+                                                                    flavor_id=flavor_id,
+                                                                    net_list=net_list)
+            self.assertEqual(type(instance_id),str)
+
+        if test_config['vimtype'] == 'openstack':
+            # create network of type data
+            network_name = _get_random_string(20)
+            net_type = 'data'
+
+            network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
+                                                                            net_type=net_type)
+            net_list = [{'use': net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+                         'type': _type, 'net_id': network_id}]
+
+            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
+                                                                    image_id=self.__class__.image_id, disk_list=None,
+                                                                    flavor_id=flavor_id,
+                                                                    net_list=net_list)
+
+            self.assertEqual(type(instance_id), unicode)
+
+            # delete created network
+            result = test_config["vim_conn"].delete_network(network_id)
+            if result:
+                logger.info("Network id {} sucessfully deleted".format(network_id))
+            else:
+                logger.info("Failed to delete network id {}".format(network_id))
+
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_040_new_vminstance_by_cloud_config(self):
+        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+        name = 'eth0'
+        user_name = 'test_user'
+
+        key_pairs = ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com']
+
+        users_data = [{'key-pairs': ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com'], 'name': user_name}]
+
+        cloud_data = {'config-files': [{'content': 'auto enp0s3\niface enp0s3 inet dhcp\n', 'dest': '/etc/network/interfaces.d/enp0s3.cfg', 'owner': 'root:root', 'permissions': '0644'}, {'content': '#! /bin/bash\nls -al >> /var/log/osm.log\n', 'dest': '/etc/rc.local', 'permissions': '0755'}, {'content': 'file content', 'dest': '/etc/test_delete'}], 'boot-data-drive': True, 'key-pairs': key_pairs, 'users': users_data }
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Cloud_vm', description='', start=False,
+                                                                image_id=self.__class__.image_id, flavor_id=flavor_id,net_list=net_list,cloud_config=cloud_data)
+
+        self.assertIsInstance(instance_id, (str, unicode))
+
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_050_new_vminstance_by_disk_list(self):
+        flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+        name = 'eth0'
+
+        device_data = [{'image_id': self.__class__.image_id, 'size': '10'}]
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', description='', start=False, image_id=self.__class__.image_id,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list,
+                                                                                         disk_list=device_data)
+
+        self.assertIsInstance(instance_id, (str, unicode))
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_060_new_vminstance_negative(self):
+        unknown_flavor_id = str(uuid.uuid4())
+        unknown_image_id = str(uuid.uuid4())
+        name = 'eth2'
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=unknown_image_id,
+                                                                  flavor_id=unknown_flavor_id,
+                                                                            net_list=net_list)
+
+        self.assertIn((context.exception).http_code, (400, 404))
+
+
+    def test_070_get_vminstance(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Get instance by its id
+        vm_info = test_config["vim_conn"].get_vminstance(self.__class__.instance_id)
+
+        if test_config['vimtype'] == 'vmware':
+            for attr in vm_info:
+                if attr == 'status':
+                    self.assertEqual(vm_info[attr], 'ACTIVE')
+                if attr == 'hostId':
+                    self.assertEqual(type(vm_info[attr]), str)
+                if attr == 'interfaces':
+                    self.assertEqual(type(vm_info[attr]), list)
+                    self.assertEqual(vm_info[attr][0]['IsConnected'], 'true')
+                if attr == 'IsEnabled':
+                    self.assertEqual(vm_info[attr], 'true')
+
+    def test_080_get_vminstance_negative(self):
+        unknown_instance_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].get_vminstance(unknown_instance_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+    def test_090_refresh_vms_status(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        if test_config['vimtype'] == 'vmware':
+            vm_list = []
+            vm_list.append(self.__class__.instance_id)
+
+            # refresh vm status
+            vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+            for attr in vm_info[self.__class__.instance_id]:
+                if attr == 'status':
+                    self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
+                if attr == 'interfaces':
+                    self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
+
+        if test_config['vimtype'] == 'openstack':
+            vpci = "0000:00:11.0"
+            name = "eth0"
+
+            flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+            # create new flavor
+            flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+             # create new vm instance
+            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+            time.sleep(30)
+            vm_list = []
+            vm_list.append(instance_id)
+
+            # refresh vm status
+            vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+            for attr in vm_info[instance_id]:
+                if attr == 'status':
+                    self.assertEqual(vm_info[instance_id][attr], 'ACTIVE')
+                if attr == 'interfaces':
+                    self.assertEqual(type(vm_info[instance_id][attr]), list)
+
+            #Deleting created vm instance
+            logger.info("Deleting created vm intance")
+            test_config["vim_conn"].delete_vminstance(instance_id)
+            time.sleep(10)
+
+
+    def test_100_refresh_vms_status_negative(self):
+        unknown_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        vm_dict = test_config["vim_conn"].refresh_vms_status([unknown_id])
+
+        if test_config['vimtype'] == 'vmware':
+            self.assertEqual(vm_dict,{})
+
+        if test_config['vimtype'] == 'openstack':
+            self.assertEqual(vm_dict[unknown_id]['status'], 'DELETED')
+
+    def test_110_action_vminstance(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        if test_config['vimtype'] == 'vmware':
+            action_list = ['shutdown', 'start', 'shutoff', 'rebuild', 'pause', 'resume']
+            # various action on vminstace
+            for action in action_list:
+                instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
+                                                                        {action: None})
+                self.assertEqual(instance_id, self.__class__.instance_id)
+
+        if test_config['vimtype'] == 'openstack':
+            # create new vm instance
+            vpci = "0000:00:11.0"
+            name = "eth0"
+
+            flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+            # create new flavor
+            flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+            new_instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+            action_list =  ['shutdown','start','shutoff','rebuild','start','pause','start']
+
+            # various action on vminstace
+            for action in action_list:
+                # sleep for sometime till status is changed
+                time.sleep(25)
+                instance_id = test_config["vim_conn"].action_vminstance(new_instance_id,
+                                                                                   { action: None})
+
+            self.assertTrue(instance_id is None)
+
+            # Deleting created vm instance
+            logger.info("Deleting created vm intance")
+            test_config["vim_conn"].delete_vminstance(new_instance_id)
+            time.sleep(10)
+
+    def test_120_action_vminstance_negative(self):
+        non_exist_id = str(uuid.uuid4())
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        action = 'start'
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].action_vminstance(non_exist_id, { action: None})
+
+        self.assertEqual((context.exception).http_code, 404)
+
+
+    def test_130_delete_vminstance(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Deleting created vm instance
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(self.__class__.instance_id)
+        time.sleep(10)
+
+    def test_140_new_vminstance_sriov(self):
+        logger.info("Testing creation of sriov vm instance using {}".format(test_config['sriov_net_name']))
+        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        sriov_net_name = test_config['sriov_net_name']
+        new_network_list = test_config["vim_conn"].get_network_list({'name': sriov_net_name})
+        for list_item in new_network_list:
+            self.assertEqual(sriov_net_name, list_item.get('name'))
+            self.__class__.sriov_network_id = list_item.get('id')
+
+        net_list = [{'use': 'data', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'VF', 'net_id': self.__class__.sriov_network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_sriov_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+        self.assertIsInstance(instance_id, (str, unicode))
+
+        logger.info("Waiting for created sriov-vm intance")
+        time.sleep(10)
+        # Deleting created vm instance
+        logger.info("Deleting created sriov-vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+class test_vimconn_get_tenant_list(test_base):
+    tenant_id = None
+
+    def test_000_get_tenant_list(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting tenant list
+        tenant_list = test_config["vim_conn"].get_tenant_list()
+
+        for item in tenant_list:
+            if test_config['tenant'] == item['name']:
+                self.__class__.tenant_id = item['id']
+                self.assertIsInstance(item['name'], (str, unicode))
+                self.assertIsInstance(item['id'], (str, unicode))
+
+    def test_010_get_tenant_list_by_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting filter tenant list by its id
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'id': self.__class__.tenant_id})
+
+        for item in filter_tenant_list:
+            self.assertIsInstance(item['id'], (str, unicode))
+            self.assertEqual(item['id'], self.__class__.tenant_id)
+
+    def test_020_get_tenant_list_by_name(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting filter tenant list by its name
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant']})
+
+        for item in filter_tenant_list:
+            self.assertIsInstance(item['name'], (str, unicode))
+            self.assertEqual(item['name'], test_config['tenant'])
+
+    def test_030_get_tenant_list_by_name_and_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting filter tenant list by its name and id
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant'],
+                                                                    'id': self.__class__.tenant_id})
+
+        for item in filter_tenant_list:
+            self.assertIsInstance(item['name'], (str, unicode))
+            self.assertIsInstance(item['id'], (str, unicode))
+            self.assertEqual(item['name'], test_config['tenant'])
+            self.assertEqual(item['id'], self.__class__.tenant_id)
+
+    def test_040_get_tenant_list_negative(self):
+        non_exist_tenant_name = "Tenant_123"
+        non_exist_tenant_id = "kjhgrt456-45345kjhdfgnbdk-34dsfjdfg"
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': non_exist_tenant_name,
+                                                                         'id': non_exist_tenant_id})
+
+        self.assertEqual(filter_tenant_list, [])
+
+
+class test_vimconn_new_tenant(test_base):
+    tenant_id = None
+
+    def test_000_new_tenant(self):
+        tenant_name = _get_random_string(20)
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name, "")
+        time.sleep(15)
+
+        self.assertIsInstance(self.__class__.tenant_id, (str, unicode))
+
+
+    def test_010_new_tenant_negative(self):
+        Invalid_tenant_name = 10121
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_tenant(Invalid_tenant_name, "")
+
+        self.assertEqual((context.exception).http_code, 400)
+
+
+    def test_020_delete_tenant(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
+
+        self.assertIsInstance(tenant_id, (str, unicode))
+
+    def test_030_delete_tenant_negative(self):
+        Non_exist_tenant_name = 'Test_30_tenant'
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].delete_tenant(Non_exist_tenant_name)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+
+def get_image_id():
+    if test_config['image_name']:
+        image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+        if len(image_list) == 0:
+            raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+        else:
+            image_id = image_list[0]['id']
+    else:
+        image_list = test_config['vim_conn'].get_image_list()
+        if len(image_list) == 0:
+            raise Exception("Not found any image at VIM")
+        else:
+            image_id = image_list[0]['id']
+    return image_id
+
+
+class test_vimconn_vminstance_by_ip_address(test_base):
+    network_name = None
+    network_id = None
+
+    def setUp(self):
+        # create network
+        self.network_name = _get_random_string(20)
+
+        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                       net_type='bridge')
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.network_id))
+
+
+    def test_000_vminstance_by_ip_address(self):
+        """
+           This test case will deploy VM with provided IP address
+           Pre-requesite: provided IP address should be from IP pool range which has used for network creation
+        """
+        name = "eth0"
+        # provide ip address  
+        ip_address = '' 
+
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+                                                    'net_id': self.network_id, 'ip_address': ip_address}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                            flavor_id=flavor_id, net_list=net_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_010_vminstance_by_ip_address_negative(self):
+        name = "eth1"
+        # IP address not from subnet range
+        invalid_ip_address = '10.10.12.1'
+
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+                                                      'net_id': self.network_id, 'ip_address': invalid_ip_address}]
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                                  flavor_id=flavor_id,
+                                                                    net_list=net_list)
+        self.assertEqual((context.exception).http_code, 400)
+
+    def test_020_vminstance_by_floating_ip(self):
+        name = "eth1"
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': True, 'port_security': True, 'type': 'virtual',
+                                                                                       'net_id': self.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                            flavor_id=flavor_id, net_list=net_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_030_vminstance_by_mac_address(self):
+        name = "eth1"
+        mac_address = "74:54:2f:21:da:8c" 
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+                                                             'net_id': self.network_id,'mac_address': mac_address}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                            flavor_id=flavor_id, net_list=net_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+class test_vimconn_vminstance_by_adding_10_nics(test_base):
+    network_name = None
+    net_ids = [] 
+
+    def setUp(self):
+        # create network
+        i = 0
+        for i in range(10):
+            self.network_name = _get_random_string(20)
+            network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                      net_type='bridge')
+            self.net_ids.append(network_id)
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        for net_id in self.net_ids:
+            result = test_config["vim_conn"].delete_network(net_id)
+            if result:
+                logger.info("Network id {} sucessfully deleted".format(net_id))
+            else:
+                logger.info("Failed to delete network id {}".format(net_id))
+
+    def test_000_vminstance_by_adding_10_nics(self):
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = []
+        c = 1
+        for net_id in self.net_ids:
+            name = "eth{}".format(c)
+            net_list.append({'use': 'bridge', 'name': name, 'floating_ip': False,
+                                    'port_security': True, 'type': 'virtual', 'net_id': net_id})
+            c = c+1
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                            flavor_id=flavor_id, net_list=net_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+
+class test_vimconn_vminstance_by_existing_disk(test_base):
+    network_name = None
+    network_id = None
+
+    def setUp(self):
+        # create network
+        self.network_name = _get_random_string(20)
+        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                       net_type='bridge')
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.network_id))
+
+
+    def test_000_vminstance_by_existing_disk(self):
+        """ This testcase will add existing disk only if given catalog/image is free 
+            means not used by any other VM
+        """
+
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+        name = "eth10"
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+        cirros_image = test_config["vim_conn"].get_image_list({'name': 'cirros'})
+        disk_list = [{'image_id': cirros_image[0]['id'],'size': 5}]
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                        'type': 'virtual', 'net_id': self.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                        flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_010_vminstance_by_new_disk(self):
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+        name = "eth10"
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+        disk_list = [{'size': '5'}]
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                                  'type': 'virtual', 'net_id': self.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                        flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_020_vminstance_by_CDROM(self):
+        """ This testcase will insert media file only if provided catalog
+            has pre-created ISO media file into vCD
+        """
+        flavor_data ={'ram': 1024, 'vcpus': 1, 'disk': 10}
+        name = "eth10"
+        image_list = test_config["vim_conn"].get_image_list({'name':'Ubuntu'})
+        disk_list = [{'image_id':image_list[0]['id'],'device_type':'cdrom'}]
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                                  'type': 'virtual', 'net_id': self.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                       flavor_id=flavor_id, net_list=net_list,disk_list=disk_list )
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+
+class test_vimconn_vminstance_by_affinity_anti_affinity(test_base):
+    network_name = None
+    network_id = None
+
+    def setUp(self):
+        # create network
+        self.network_name = _get_random_string(20)
+        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                       net_type='bridge')
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.network_id))
+
+    def test_000_vminstance_by_affinity_anti_affinity(self):
+        """ This testcase will deploy VM into provided HOSTGROUP in VIM config
+            Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+            While creating VIM account user has to pass the Host Group names in availability_zone list
+        """
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+        name = "eth10"
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                        'type': 'virtual', 'net_id': self.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                  flavor_id=flavor_id, net_list=net_list,availability_zone_index=1,
+                                                        availability_zone_list=['HG_174','HG_175'])
+
+        self.assertEqual(type(instance_id),str)
+        time.sleep(10)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+
+class test_vimconn_vminstance_by_numa_affinity(test_base):
+    network_name = None
+    network_id = None
+
+    def setUp(self):
+        # create network
+        self.network_name = _get_random_string(20)
+        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                       net_type='bridge')
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.network_id))
+
+    def test_000_vminstance_by_numa_affinity(self):
+        flavor_data = {'extended': {'numas': [{'paired-threads-id': [['1', '3'], ['2', '4']],
+                                                                        ' paired-threads': 2,                                                                                                                                  'memory': 1}]},
+                                                         'ram': 1024, 'vcpus': 1, 'disk': 10}
+        name = "eth10"
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        image_id = get_image_id()
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                        'type': 'virtual', 'net_id': self.network_id}]
+
+        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                            flavor_id=flavor_id, net_list=net_list)
+
+        self.assertEqual(type(instance_id),str)
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+
+'''
+IMPORTANT NOTE
+The following unittest class does not have the 'test_' on purpose. This test is the one used for the
+scenario based tests.
+'''
+class descriptor_based_scenario_test(test_base):
+    test_index = 0
+    scenario_test_path = None
+
+    @classmethod
+    def setUpClass(cls):
+        cls.test_index = 1
+        cls.to_delete_list = []
+        cls.scenario_uuids = []
+        cls.instance_scenario_uuids = []
+        cls.scenario_test_path = test_config["test_directory"] + '/' + test_config["test_folder"]
+        logger.info("{}. {} {}".format(test_config["test_number"], cls.__name__, test_config["test_folder"]))
+
+    @classmethod
+    def tearDownClass(cls):
+         test_config["test_number"] += 1
+
+    def test_000_load_scenario(self):
+        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name,
+                                                           test_config["test_folder"])
+        self.__class__.test_index += 1
+        # load VNFD and NSD
+        descriptor_files = glob.glob(self.__class__.scenario_test_path+'/*.yaml')
+        vnf_descriptors = []
+        scenario_descriptors = []
+        for descriptor_file in descriptor_files:
+            with open(descriptor_file, 'r') as stream:
+                descriptor = yaml.load(stream, Loader=yaml.Loader)
+                if "vnf" in descriptor or "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
+                    vnf_descriptors.append(descriptor)
+                else:
+                    scenario_descriptors.append(descriptor)
+
+        scenario_file = glob.glob(self.__class__.scenario_test_path + '/scenario_*.yaml')
+        if not vnf_descriptors or not scenario_descriptors or len(scenario_descriptors) > 1:
+            raise Exception("Test '{}' not valid. It must contain an scenario file and at least one vnfd file'".format(
+                test_config["test_folder"]))
+
+        # load all vnfd
+        for vnf_descriptor in vnf_descriptors:
+            logger.debug("VNF descriptor: {}".format(vnf_descriptor))
+            vnf = test_config["client"].create_vnf(descriptor=vnf_descriptor, image_name=test_config["image_name"])
+            logger.debug(vnf)
+            if 'vnf' in vnf:
+                vnf_uuid = vnf['vnf']['uuid']
+            else:
+                vnf_uuid = vnf['vnfd'][0]['uuid']
+            self.__class__.to_delete_list.insert(0, {"item": "vnf", "function": test_config["client"].delete_vnf,
+                                                     "params": {"uuid": vnf_uuid}})
+
+        # load the scenario definition
+        for scenario_descriptor in scenario_descriptors:
+            # networks = scenario_descriptor['scenario']['networks']
+            # networks[test_config["mgmt_net"]] = networks.pop('mgmt')
+            logger.debug("Scenario descriptor: {}".format(scenario_descriptor))
+            scenario = test_config["client"].create_scenario(descriptor=scenario_descriptor)
+            logger.debug(scenario)
+            if 'scenario' in scenario:
+                scenario_uuid = scenario['scenario']['uuid']
+            else:
+                scenario_uuid = scenario['nsd'][0]['uuid']
+            self.__class__.to_delete_list.insert(0, {"item": "scenario",
+                                                     "function": test_config["client"].delete_scenario,
+                                                     "params": {"uuid": scenario_uuid}})
+            self.__class__.scenario_uuids.append(scenario_uuid)
+
+    def test_010_instantiate_scenario(self):
+        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name,
+                                                           test_config["test_folder"])
+        self.__class__.test_index += 1
+        for scenario_uuid in self.__class__.scenario_uuids:
+            instance_descriptor = {
+                "instance":{
+                    "name": self.__class__.test_text,
+                    "scenario": scenario_uuid,
+                    "networks":{
+                        "mgmt": {"sites": [ { "netmap-use": test_config["mgmt_net"]} ]}
+                    }
+                }
+            }
+            instance = test_config["client"].create_instance(instance_descriptor)
+            self.__class__.instance_scenario_uuids.append(instance['uuid'])
+            logger.debug(instance)
+            self.__class__.to_delete_list.insert(0, {"item": "instance",
+                                                     "function": test_config["client"].delete_instance,
+                                                     "params": {"uuid": instance['uuid']}})
+
+    def test_020_check_deployent(self):
+        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name,
+                                                           test_config["test_folder"])
+        self.__class__.test_index += 1
+
+        if test_config["manual"]:
+            input('Scenario has been deployed. Perform manual check and press any key to resume')
+            return
+
+        keep_waiting = test_config["timeout"]
+        pending_instance_scenario_uuids = list(self.__class__.instance_scenario_uuids)   # make a copy
+        while pending_instance_scenario_uuids:
+            index = 0
+            while index < len(pending_instance_scenario_uuids):
+                result = check_instance_scenario_active(pending_instance_scenario_uuids[index])
+                if result[0]:
+                    del pending_instance_scenario_uuids[index]
+                    break
+                elif 'ERROR' in result[1]:
+                    msg = 'Got error while waiting for the instance to get active: '+result[1]
+                    logging.error(msg)
+                    raise Exception(msg)
+                index += 1
+
+            if keep_waiting >= 5:
+                time.sleep(5)
+                keep_waiting -= 5
+            elif keep_waiting > 0:
+                time.sleep(keep_waiting)
+                keep_waiting = 0
+            else:
+                msg = 'Timeout reached while waiting instance scenario to get active'
+                logging.error(msg)
+                raise Exception(msg)
+
+    def test_030_clean_deployment(self):
+        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                              inspect.currentframe().f_code.co_name,
+                                                              test_config["test_folder"])
+        self.__class__.test_index += 1
+        #At the moment if you delete an scenario right after creating it, in openstack datacenters
+        #sometimes scenario ports get orphaned. This sleep is just a dirty workaround
+        time.sleep(5)
+        for item in self.__class__.to_delete_list:
+            response = item["function"](**item["params"])
+            logger.debug(response)
+
+
+def _get_random_string(maxLength):
+    '''generates a string with random characters string.letters and string.digits
+    with a random length up to maxLength characters. If maxLength is <15 it will be changed automatically to 15
+    '''
+    prefix = 'testing_'
+    min_string = 15
+    minLength = min_string - len(prefix)
+    if maxLength < min_string: maxLength = min_string
+    maxLength -= len(prefix)
+    length = random.randint(minLength,maxLength)
+    return 'testing_'+"".join([random.choice(string.letters+string.digits) for i in xrange(length)])
+
+
+def test_vimconnector(args):
+    global test_config
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+    test_config['vimtype'] = args.vimtype
+    if args.vimtype == "vmware":
+        import vimconn_vmware as vim
+
+        test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+
+        tenant_name = args.tenant_name
+        test_config['tenant'] = tenant_name
+        config_params = yaml.load(args.config_param, Loader=yaml.Loader)
+        org_name = config_params.get('orgname')
+        org_user = config_params.get('user')
+        org_passwd = config_params.get('passwd')
+        vim_url = args.endpoint_url
+        test_config['image_path'] = args.image_path
+        test_config['image_name'] = args.image_name
+        test_config['sriov_net_name'] = args.sriov_net_name
+
+        # vmware connector obj
+        test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,passwd=org_passwd, url=vim_url, config=config_params)
+
+    elif args.vimtype == "aws":
+        import vimconn_aws as vim
+    elif args.vimtype == "openstack":
+        import vimconn_openstack as vim
+
+        test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+
+        tenant_name = args.tenant_name
+        test_config['tenant'] = tenant_name
+        config_params = yaml.load(args.config_param, Loader=yaml.Loader)
+        os_user = config_params.get('user')
+        os_passwd = config_params.get('passwd')
+        vim_url = args.endpoint_url
+        test_config['image_path'] = args.image_path
+        test_config['image_name'] = args.image_name
+        test_config['sriov_net_name'] = args.sriov_net_name
+
+        # openstack connector obj
+        vim_persistent_info = {}
+        test_config['vim_conn'] = vim.vimconnector(
+            uuid="test-uuid-1", name="VIO-openstack",
+            tenant_id=None, tenant_name=tenant_name,
+            url=vim_url, url_admin=None,
+            user=os_user, passwd=os_passwd,
+            config=config_params, persistent_info=vim_persistent_info
+        )
+        test_config['vim_conn'].debug = "true"
+
+    elif args.vimtype == "openvim":
+        import vimconn_openvim as vim
+    else:
+        logger.critical("vimtype '{}' not supported".format(args.vimtype))
+        sys.exit(1)
+    executed = 0
+    failed = 0
+    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+    # If only want to obtain a tests list print it and exit
+    if args.list_tests:
+        tests_names = []
+        for cls in clsmembers:
+            if cls[0].startswith('test_vimconn'):
+                tests_names.append(cls[0])
+
+        msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names))
+        print(msg)
+        logger.info(msg)
+        sys.exit(0)
+
+    # Create the list of tests to be run
+    code_based_tests = []
+    if args.tests:
+        for test in args.tests:
+            for t in test.split(','):
+                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                if len(matches_code_based_tests) > 0:
+                    code_based_tests.append(matches_code_based_tests[0][1])
+                else:
+                    logger.critical("Test '{}' is not among the possible ones".format(t))
+                    sys.exit(1)
+    if not code_based_tests:
+        # include all tests
+        for cls in clsmembers:
+            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+            if cls[0].startswith('test_vimconn'):
+                code_based_tests.append(cls[1])
+
+    logger.debug("tests to be executed: {}".format(code_based_tests))
+
+    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+    # This is handled in the tests using logging.
+    stream = open('/dev/null', 'w')
+
+    # Run code based tests
+    basic_tests_suite = unittest.TestSuite()
+    for test in code_based_tests:
+        basic_tests_suite.addTest(unittest.makeSuite(test))
+    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+    executed += result.testsRun
+    failed += len(result.failures) + len(result.errors)
+    if failfast and failed:
+        sys.exit(1)
+    if len(result.failures) > 0:
+        logger.debug("failures : {}".format(result.failures))
+    if len(result.errors) > 0:
+        logger.debug("errors : {}".format(result.errors))
+    return executed, failed
+
+
+def test_vim(args):
+    global test_config
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+    import openmanoclient
+    executed = 0
+    failed = 0
+    test_config["client"] = openmanoclient.openmanoclient(
+        endpoint_url=args.endpoint_url,
+        tenant_name=args.tenant_name,
+        datacenter_name=args.datacenter,
+        debug=args.debug, logger=test_config["logger_name"])
+    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+    # If only want to obtain a tests list print it and exit
+    if args.list_tests:
+        tests_names = []
+        for cls in clsmembers:
+            if cls[0].startswith('test_VIM'):
+                tests_names.append(cls[0])
+
+        msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+              "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+              "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+        print(msg)
+        logger.info(msg)
+        sys.exit(0)
+
+    # Create the list of tests to be run
+    code_based_tests = []
+    if args.tests:
+        for test in args.tests:
+            for t in test.split(','):
+                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                if len(matches_code_based_tests) > 0:
+                    code_based_tests.append(matches_code_based_tests[0][1])
+                else:
+                    logger.critical("Test '{}' is not among the possible ones".format(t))
+                    sys.exit(1)
+    if not code_based_tests:
+        # include all tests
+        for cls in clsmembers:
+            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+            if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+                code_based_tests.append(cls[1])
+
+    logger.debug("tests to be executed: {}".format(code_based_tests))
+
+    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+    # This is handled in the tests using logging.
+    stream = open('/dev/null', 'w')
+
+    # Run code based tests
+    basic_tests_suite = unittest.TestSuite()
+    for test in code_based_tests:
+        basic_tests_suite.addTest(unittest.makeSuite(test))
+    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+    executed += result.testsRun
+    failed += len(result.failures) + len(result.errors)
+    if failfast and failed:
+        sys.exit(1)
+    if len(result.failures) > 0:
+        logger.debug("failures : {}".format(result.failures))
+    if len(result.errors) > 0:
+        logger.debug("errors : {}".format(result.errors))
+    return executed, failed
+
+
+def test_wim(args):
+    global test_config
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+    import openmanoclient
+    executed = 0
+    failed = 0
+    test_config["client"] = openmanoclient.openmanoclient(
+        endpoint_url=args.endpoint_url,
+        tenant_name=args.tenant_name,
+        datacenter_name=args.datacenter,
+        debug=args.debug, logger=test_config["logger_name"])
+    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+    # If only want to obtain a tests list print it and exit
+    if args.list_tests:
+        tests_names = []
+        for cls in clsmembers:
+            if cls[0].startswith('test_WIM'):
+                tests_names.append(cls[0])
+
+        msg = "The 'wim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+              "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+              "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+        print(msg)
+        logger.info(msg)
+        sys.exit(0)
+
+    # Create the list of tests to be run
+    code_based_tests = []
+    if args.tests:
+        for test in args.tests:
+            for t in test.split(','):
+                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                if len(matches_code_based_tests) > 0:
+                    code_based_tests.append(matches_code_based_tests[0][1])
+                else:
+                    logger.critical("Test '{}' is not among the possible ones".format(t))
+                    sys.exit(1)
+    if not code_based_tests:
+        # include all tests
+        for cls in clsmembers:
+            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+            if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+                code_based_tests.append(cls[1])
+
+    logger.debug("tests to be executed: {}".format(code_based_tests))
+
+    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+    # This is handled in the tests using logging.
+    stream = open('/dev/null', 'w')
+
+    # Run code based tests
+    basic_tests_suite = unittest.TestSuite()
+    for test in code_based_tests:
+        basic_tests_suite.addTest(unittest.makeSuite(test))
+    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+    executed += result.testsRun
+    failed += len(result.failures) + len(result.errors)
+    if failfast and failed:
+        sys.exit(1)
+    if len(result.failures) > 0:
+        logger.debug("failures : {}".format(result.failures))
+    if len(result.errors) > 0:
+        logger.debug("errors : {}".format(result.errors))
+    return executed, failed
+
+
+def test_deploy(args):
+    global test_config
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+    import openmanoclient
+    executed = 0
+    failed = 0
+    test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+    test_config["image_name"] = args.image_name
+    test_config["mgmt_net"] = args.mgmt_net
+    test_config["manual"] = args.manual
+    test_directory_content = os.listdir(test_config["test_directory"])
+    # If only want to obtain a tests list print it and exit
+    if args.list_tests:
+        msg = "the 'deploy' set tests are:\n\t" + ', '.join(sorted(test_directory_content))
+        print(msg)
+        # logger.info(msg)
+        sys.exit(0)
+
+    descriptor_based_tests = []
+    # Create the list of tests to be run
+    code_based_tests = []
+    if args.tests:
+        for test in args.tests:
+            for t in test.split(','):
+                if t in test_directory_content:
+                    descriptor_based_tests.append(t)
+                else:
+                    logger.critical("Test '{}' is not among the possible ones".format(t))
+                    sys.exit(1)
+    if not descriptor_based_tests:
+        # include all tests
+        descriptor_based_tests = test_directory_content
+
+    logger.debug("tests to be executed: {}".format(code_based_tests))
+
+    # import openmanoclient from relative path
+    test_config["client"] = openmanoclient.openmanoclient(
+        endpoint_url=args.endpoint_url,
+        tenant_name=args.tenant_name,
+        datacenter_name=args.datacenter,
+        debug=args.debug, logger=test_config["logger_name"])
+
+    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+    # This is handled in the tests using logging.
+    stream = open('/dev/null', 'w')
+    # This scenario based tests are defined as directories inside the directory defined in 'test_directory'
+    for test in descriptor_based_tests:
+        test_config["test_folder"] = test
+        test_suite = unittest.TestSuite()
+        test_suite.addTest(unittest.makeSuite(descriptor_based_scenario_test))
+        result = unittest.TextTestRunner(stream=stream, failfast=False).run(test_suite)
+        executed += result.testsRun
+        failed += len(result.failures) + len(result.errors)
+        if failfast and failed:
+            sys.exit(1)
+        if len(result.failures) > 0:
+            logger.debug("failures : {}".format(result.failures))
+        if len(result.errors) > 0:
+            logger.debug("errors : {}".format(result.errors))
+
+    return executed, failed
+
+if __name__=="__main__":
+
+    parser = ArgumentParser(description='Test RO module')
+    parser.add_argument('-v','--version', action='version', help="Show current version",
+                             version='%(prog)s version ' + __version__  + ' ' + version_date)
+
+    # Common parameters
+    parent_parser = ArgumentParser(add_help=False)
+    parent_parser.add_argument('--failfast', help='Stop when a test fails rather than execute all tests',
+                      dest='failfast', action="store_true", default=False)
+    parent_parser.add_argument('--failed', help='Set logs to show only failed tests. --debug disables this option',
+                      dest='failed', action="store_true", default=False)
+    default_logger_file = os.path.dirname(__file__)+'/'+os.path.splitext(os.path.basename(__file__))[0]+'.log'
+    parent_parser.add_argument('--list-tests', help='List all available tests', dest='list_tests', action="store_true",
+                      default=False)
+    parent_parser.add_argument('--logger_file', dest='logger_file', default=default_logger_file,
+                               help='Set the logger file. By default '+default_logger_file)
+    parent_parser.add_argument("-t", '--tenant', dest='tenant_name', default="osm",
+                               help="Set the openmano tenant to use for the test. By default 'osm'")
+    parent_parser.add_argument('--debug', help='Set logs to debug level', dest='debug', action="store_true")
+    parent_parser.add_argument('--timeout', help='Specify the instantiation timeout in seconds. By default 300',
+                          dest='timeout', type=int, default=300)
+    parent_parser.add_argument('--test', '--tests', help='Specify the tests to run', dest='tests', action="append")
+
+    subparsers = parser.add_subparsers(help='test sets')
+
+    # Deployment test set
+    # -------------------
+    deploy_parser = subparsers.add_parser('deploy', parents=[parent_parser],
+                                          help="test deployment using descriptors at RO_test folder ")
+    deploy_parser.set_defaults(func=test_deploy)
+
+    # Mandatory arguments
+    mandatory_arguments = deploy_parser.add_argument_group('mandatory arguments')
+    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+    mandatory_arguments.add_argument("-i", '--image-name', required=True, dest="image_name",
+                                     help='Image name available at datacenter used for the tests')
+    mandatory_arguments.add_argument("-n", '--mgmt-net-name', required=True, dest='mgmt_net',
+                                     help='Set the vim management network to use for tests')
+
+    # Optional arguments
+    deploy_parser.add_argument('-m', '--manual-check', dest='manual', action="store_true", default=False,
+                               help='Pause execution once deployed to allow manual checking of the '
+                                    'deployed instance scenario')
+    deploy_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                               help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
+    # Vimconn test set
+    # -------------------
+    vimconn_parser = subparsers.add_parser('vimconn', parents=[parent_parser], help="test vimconnector plugin")
+    vimconn_parser.set_defaults(func=test_vimconnector)
+    # Mandatory arguments
+    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+    mandatory_arguments.add_argument('--vimtype', choices=['vmware', 'aws', 'openstack', 'openvim'], required=True,
+                                     help='Set the vimconnector type to test')
+    mandatory_arguments.add_argument('-c', '--config', dest='config_param', required=True,
+                                    help='Set the vimconnector specific config parameters in dictionary format')
+    mandatory_arguments.add_argument('-u', '--url', dest='endpoint_url',required=True, help="Set the vim connector url or Host IP")
+    # Optional arguments
+    vimconn_parser.add_argument('-i', '--image-path', dest='image_path', help="Provide image path present at RO container")
+    vimconn_parser.add_argument('-n', '--image-name', dest='image_name', help="Provide image name for test")
+    # TODO add optional arguments for vimconn tests
+    # vimconn_parser.add_argument("-i", '--image-name', dest='image_name', help='<HELP>'))
+    vimconn_parser.add_argument('-s', '--sriov-net-name', dest='sriov_net_name', help="Provide SRIOV network name for test")
+
+    # Datacenter test set
+    # -------------------
+    vimconn_parser = subparsers.add_parser('vim', parents=[parent_parser], help="test vim")
+    vimconn_parser.set_defaults(func=test_vim)
+
+    # Mandatory arguments
+    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+
+    # Optional arguments
+    vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                               help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
+    # WIM test set
+    # -------------------
+    vimconn_parser = subparsers.add_parser('wim', parents=[parent_parser], help="test wim")
+    vimconn_parser.set_defaults(func=test_wim)
+
+    # Mandatory arguments
+    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+
+    # Optional arguments
+    vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                                help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
+    argcomplete.autocomplete(parser)
+    args = parser.parse_args()
+    # print str(args)
+    test_config = {}
+
+    # default logger level is INFO. Options --debug and --failed override this, being --debug prioritary
+    logger_level = 'INFO'
+    if args.debug:
+        logger_level = 'DEBUG'
+    elif args.failed:
+        logger_level = 'WARNING'
+    logger_name = os.path.basename(__file__)
+    test_config["logger_name"] = logger_name
+    logger = logging.getLogger(logger_name)
+    logger.setLevel(logger_level)
+    failfast = args.failfast
+
+    # Configure a logging handler to store in a logging file
+    if args.logger_file:
+        fileHandler = logging.FileHandler(args.logger_file)
+        formatter_fileHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
+        fileHandler.setFormatter(formatter_fileHandler)
+        logger.addHandler(fileHandler)
+
+    # Configure a handler to print to stdout
+    consoleHandler = logging.StreamHandler(sys.stdout)
+    formatter_consoleHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
+    consoleHandler.setFormatter(formatter_consoleHandler)
+    logger.addHandler(consoleHandler)
+
+    logger.debug('Program started with the following arguments: ' + str(args))
+
+    # set test config parameters
+    test_config["timeout"] = args.timeout
+    test_config["test_number"] = 1
+
+    executed, failed = args.func(args)
+
+    # Log summary
+    logger.warning("Total number of tests: {}; Total number of failures/errors: {}".format(executed, failed))
+    sys.exit(1 if failed else 0)
diff --git a/RO/test/test_on_container.sh b/RO/test/test_on_container.sh
new file mode 100755 (executable)
index 0000000..e3400b0
--- /dev/null
@@ -0,0 +1,177 @@
+#!/bin/bash
+
+##
+# Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+##
+
+# Author: Alfonso Tierno (alfonso.tiernosepulveda@telefonica.com)
+
+description="It creates a new lxc container, installs RO from a concrete commit and executes validation tests.\
+ An openvim in test mode is installed and used to validate"
+
+usage(){
+    echo -e "usage: ${BASH_SOURCE[0]} CONTAINER\n ${description}"
+    echo -e "  CONTAINER is the name of the container to be created. By default test1"\
+            "Warning! if a container with the same name exists, it will be deleted"
+    echo -e "  You must also supply at TEST_RO_COMMIT envioronmental variable with the git command"\
+            "to clone the version under test. It can be copy paste from gerrit. Examples:\n"\
+            " TEST_RO_COMMIT='git fetch https://osm.etsi.org/gerrit/osm/RO refs/changes/40/5540/1 && git checkout FETCH_HEAD'\n"\
+            " TEST_RO_COMMIT='git checkout v3.0.1'"
+    echo -e "  You can provide TEST_RO_GIT_URL, by default https://osm.etsi.org/gerrit/osm/RO is used"
+    echo -e "  You can provide TEST_RO_CONTAINER instead of by parameter, by default test1"
+    echo -e "  You can provide TEST_RO_CUSTOM, with a command for container customization, by default nothing."
+}
+
+[ "$1" = "--help" ] || [ "$1" = "-h" ] && usage && exit 0
+
+[[ -z "$TEST_RO_COMMIT" ]] && echo 'provide a TEST_RO_COMMIT variable. Type --help for more info' >&2 && exit 1
+[[ -z "$TEST_RO_GIT_URL" ]] && TEST_RO_GIT_URL="https://osm.etsi.org/gerrit/osm/RO"
+
+[ -n "$1" ] && TEST_RO_CONTAINER="$1"
+[[ -z "$TEST_RO_CONTAINER" ]] && TEST_RO_CONTAINER=test1
+
+instance_name=3vdu_2vnf
+
+function echo_RO_log(){
+    # echo "LOG DUMP:" >&2 && lxc exec "$TEST_RO_CONTAINER" -- tail -n 150 /var/log/osm/openmano.log >&2
+    echo -e "\nFAILED" >&2
+}
+
+function lxc_exec(){
+    if ! lxc exec "$TEST_RO_CONTAINER" --env OPENMANO_TENANT=osm --env OPENMANO_DATACENTER=local-openvim \
+        --env OPENVIM_TENANT="$OPENVIM_TENANT" -- bash -c "$*"
+    then
+        echo "ERROR on command '$*'" >&2
+        echo_RO_log
+        exit 1
+    fi
+}
+
+function wait_until_deleted(){
+    wait_active=0
+    while lxc_exec RO/test/local/openvim/openvim vm-list | grep -q -e ${instance_name} ||
+          lxc_exec RO/test/local/openvim/openvim net-list | grep -q -e ${instance_name}
+    do
+        echo -n "."
+        [ $wait_active -gt 90 ] &&  echo "timeout waiting VM and nets deleted at VIM" >&2 && echo_RO_log && exit 1
+        wait_active=$((wait_active + 1))
+        sleep 1
+    done
+    echo
+}
+
+lxc delete "$TEST_RO_CONTAINER" --force 2>/dev/null && echo "container '$TEST_RO_CONTAINER' deleted"
+lxc launch ubuntu:16.04 "$TEST_RO_CONTAINER"
+sleep 10
+[[ -n "$TEST_RO_CUSTOM" ]] && ${TEST_RO_CUSTOM}
+lxc_exec ifconfig eth0 mtu 1446  # Avoid problems when inside an openstack VM that normally limit MTU do this value
+lxc_exec git clone "$TEST_RO_GIT_URL"
+lxc_exec git -C RO status
+lxc_exec "cd RO && $TEST_RO_COMMIT"
+
+# TEST INSTALL
+lxc_exec RO/scripts/install-openmano.sh --noclone --force -q --updatedb -b master
+sleep 10
+lxc_exec openmano tenant-create osm
+lxc_exec openmano tenant-list
+
+# TEST database migration
+lxc_exec ./RO/database_utils/migrate_mano_db.sh 20
+lxc_exec ./RO/database_utils/migrate_mano_db.sh
+lxc_exec ./RO/database_utils/migrate_mano_db.sh 20
+lxc_exec ./RO/database_utils/migrate_mano_db.sh
+
+# TEST instantiate with a fake local openvim
+lxc_exec ./RO/test/basictest.sh -f --insert-bashrc --install-openvim reset add-openvim create delete
+
+
+# TEST instantiate with a fake local openvim 2
+lxc_exec ./RO/test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast
+lxc_exec ./RO/test/test_RO.py vim  -t osm  -d local-openvim --timeout=30 --failfast
+
+sleep 10
+echo "TEST service restart in the middle of a instantiation/deletion"
+OPENVIM_TENANT=`lxc_exec RO/test/local/openvim/openvim tenant-list`
+OPENVIM_TENANT=${OPENVIM_TENANT%% *}
+
+lxc_exec openmano vnf-create RO/vnfs/examples/v3_3vdu_vnfd.yaml --image-name=cirros034
+lxc_exec openmano scenario-create RO/scenarios/examples/v3_3vdu_2vnf_nsd.yaml
+wait_until_deleted
+test_number=0
+while [ $test_number -lt 5 ] ; do
+    echo test ${test_number}.0 test instantiation recovering
+    lxc_exec openmano instance-scenario-create --name ${instance_name} --scenario osm_id=3vdu_2vnf_nsd";"service osm-ro stop
+    sleep 5
+    lxc_exec service osm-ro start
+    sleep 10
+    # wait until all VM are active
+    wait_active=0
+    while [ `lxc_exec openmano instance-scenario-list ${instance_name} | grep ACTIVE | wc -l` -lt 7 ] ; do
+        echo -n "."
+        [ $wait_active -gt 90 ] &&  echo "timeout waiting VM active" >&2 && echo_RO_log && exit 1
+        wait_active=$((wait_active + 1))
+        sleep 1
+    done
+    echo
+
+    # Due to race condition the VIM request can be processed without getting the response by RO
+    # resulting in having some VM or net at VIM not registered by RO. If this is the case need to be deleted manually
+    vim_vms=`lxc_exec RO/test/local/openvim/openvim vm-list | grep ${instance_name} | awk '{print $1}'`
+    for vim_vm in $vim_vms ; do
+        if ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q $vim_vm ; then
+            echo deleting VIM vm $vim_vm
+            lxc_exec RO/test/local/openvim/openvim vm-delete -f $vim_vm
+        fi
+    done
+    vim_nets=`lxc_exec RO/test/local/openvim/openvim net-list | grep ${instance_name} | awk '{print $1}'`
+    for vim_net in $vim_nets ; do
+        if ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q $vim_net ; then
+            echo deleting VIM net $vim_net
+            lxc_exec RO/test/local/openvim/openvim net-delete -f $vim_net
+        fi
+    done
+
+    # delete first VIM VM and wait until RO detects it
+    echo test ${test_number}.1 test refresh VM VIM status deleted
+    OPENVIM_VM=`lxc_exec RO/test/local/openvim/openvim vm-list`
+    OPENVIM_VM=${OPENVIM_VM%% *}
+    lxc_exec RO/test/local/openvim/openvim vm-delete -f $OPENVIM_VM
+    wait_active=0
+    while ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q DELETED ; do
+        echo -n "."
+        [ $wait_active -gt 90 ] &&  echo "timeout waiting RO get VM status as DELETED" >&2 && echo_RO_log && exit 1
+        wait_active=$((wait_active + 1))
+        sleep 1
+        ACTIVE=`lxc_exec openmano instance-scenario-list ${instance_name} | grep ACTIVE | wc -l`
+    done
+    echo
+
+    # TEST service restart in the middle of a instantiation deletion
+    echo test ${test_number}.2 test isntantiation deletion recovering
+    lxc_exec openmano instance-scenario-delete ${instance_name} -f";"service osm-ro stop
+    sleep 5
+    lxc_exec service osm-ro start
+    sleep 10
+    # wait until all VM are deteled at VIM
+    wait_until_deleted
+
+    test_number=$((test_number + 1))
+done
+echo "DONE"
+
+
diff --git a/RO/test/test_openmanocli.sh b/RO/test/test_openmanocli.sh
new file mode 100755 (executable)
index 0000000..0490467
--- /dev/null
@@ -0,0 +1,207 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano.
+#WARNING: It destroy the database content
+
+
+function usage(){
+    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test openmano with fake tenant, datancenters, etc."\
+            "It assumes that you have configured openmano cli with HOST,PORT,TENANT with environment variables"
+            "If not, it will use by default localhost:9080 and creates a new TENANT"
+    echo -e "    -h --help        shows this help"
+}
+
+function is_valid_uuid(){
+    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+    return 1
+}
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+
+#detect paths of executables, preceding the relative paths
+openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
+service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
+    service_openmano="$DIRscript/service-openmano"
+initopenvim="initopenvim"
+openvim="openvim"
+
+function _exit()
+{
+    EXIT_STATUS=$1
+    for item in $ToDelete
+    do
+        command=${item%%:*}
+        uuid=${item#*:}
+        [[ $command == "datacenter-detach" ]] && force="" || force=-f
+        printf "%-50s" "$command $uuid:"
+        ! $openmano $command $uuid $force >> /dev/null && echo FAIL && EXIT_STATUS=1 || echo OK
+     done
+    [[ ${BASH_SOURCE[0]} != $0 ]] && return $1 || exit $EXIT_STATUS
+}
+
+
+# process options
+source ${DIRscript}/get-options.sh "force:-f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
+                $* || _exit 1
+
+# help
+[ -n "$option_help" ] && usage && _exit 0
+
+
+ToDelete=""
+DCs="dc-fake1-openstack dc-fake2-openvim" #dc-fake3-vmware
+Ts="fake-tenant1 fake-tenand2"
+SDNs="sdn-fake1-opendaylight sdn-fake2-floodlight sdn-fake3-onos"
+
+for T in $Ts
+do
+    printf "%-50s" "Creating fake tenant '$T':"
+    ! result=`$openmano tenant-create "$T"` && echo FAIL && echo "    $result" && _exit 1
+    tenant=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $tenant && echo "FAIL" && echo "    $result" && _exit 1
+    echo $tenant
+    ToDelete="tenant-delete:$tenant $ToDelete"
+    [[ -z "$OPENMANO_TENANT" ]] && export OPENMANO_TENANT=$tenant
+done
+
+index=0
+for DC in $DCs
+do
+    index=$((index+1))
+    printf "%-50s" "Creating datacenter '$DC':"
+    ! result=`$openmano datacenter-create "$DC" "http://$DC/v2.0" --type=${DC##*-} --config='{insecure: True}'` &&
+        echo FAIL && echo "    $result" && _exit 1
+    datacenter=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && _exit 1
+    echo $datacenter
+    eval DC${index}=$datacenter
+    ToDelete="datacenter-delete:$datacenter $ToDelete"
+    [[ -z "$datacenter_empty" ]] && datacenter_empty=datacenter
+
+    printf "%-50s" "Attaching openmano tenant to the datacenter:"
+    ! result=`$openmano datacenter-attach "$DC" --vim-tenant-name=osm --config='{insecure: False}'` &&
+        echo FAIL && echo "    $result" && _exit 1
+    ToDelete="datacenter-detach:$datacenter $ToDelete"
+    echo OK
+done
+
+printf "%-50s" "Datacenter list:"
+! result=`$openmano datacenter-list` &&
+    echo  "FAIL" && echo "    $result" && _exit 1
+for verbose in "" -v -vv -vvv
+do
+    ! result=`$openmano datacenter-list "$DC" $verbose` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+dpid_prefix=55:56:57:58:59:60:61:0
+dpid_sufix=0
+for SDN in $SDNs
+do
+    printf "%-50s" "Creating SDN controller '$SDN':"
+    ! result=`$openmano sdn-controller-create "$SDN" --ip 4.5.6.7 --port 80 --type=${SDN##*-} \
+        --user user --passwd p --dpid=${dpid_prefix}${dpid_sufix}` && echo "FAIL" && echo "    $result" && _exit 1
+    sdn=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $sdn && echo "FAIL" && echo "    $result" && _exit 1
+    echo $sdn
+    ToDelete="sdn-controller-delete:$sdn $ToDelete"
+    dpid_sufix=$((dpid_sufix+1))
+
+done
+printf "%-50s" "Edit SDN-controller:"
+for edit in user=u password=p ip=5.6.6.7 port=81 name=name dpid=45:55:54:45:44:44:55:67
+do
+    ! result=`$openmano sdn-controller-edit $sdn -f --"${edit}"` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "SDN-controller list:"
+! result=`$openmano sdn-controller-list` &&
+    echo  "FAIL" && echo "    $result" && _exit 1
+for verbose in "" -v -vv -vvv
+do
+    ! result=`$openmano sdn-controller-list "$sdn" $verbose` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "Add sdn to datacenter:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Clear Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Set Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "List Port mapping:"
+for verbose in "" -v -vv -vvv
+do
+    ! result=`$openmano datacenter-sdn-port-mapping-list "$DC" $verbose` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Clear again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Remove datacenter sdn:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller null` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Negative list port mapping:"
+result=`$openmano datacenter-sdn-port-mapping-list $DC` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Add again datacenter sdn:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Empty list port mapping:"
+! [[ `$openmano datacenter-sdn-port-mapping-list $DC | wc -l` -eq 6 ]] &&
+    echo "FAIL" && _exit 1 || echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+_exit 0
+
diff --git a/RO/test/test_openmanoclient.py b/RO/test/test_openmanoclient.py
new file mode 100755 (executable)
index 0000000..6bdd67c
--- /dev/null
@@ -0,0 +1,505 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Module to test openmanoclient class and indirectly the whole openmano
+It allows both python 2 and python 3
+'''
+__author__="Alfonso Tierno"
+__date__ ="$09-Mar-2016 09:09:48$"
+__version__="0.0.2"
+version_date="May 2016"
+
+import logging
+import imp 
+        
+
+
+def _get_random_name(maxLength):
+    '''generates a string with random craracters from space (ASCCI 32) to ~(ASCCI 126)
+    with a random length up to maxLength
+    '''
+    long_name = "testing up to {} size name: ".format(maxLength) 
+    #long_name += ''.join(chr(random.randint(32,126)) for _ in range(random.randint(20, maxLength-len(long_name))))
+    long_name += ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ') for _ in range(20, maxLength-len(long_name)))
+    return long_name
+
+
+if __name__=="__main__":
+    import getopt
+    #import os
+    import sys
+    
+
+
+    usage =\
+    """Make a test against an openmano server.\nUsage: test_openmanoclient [options]
+    -v|--verbose: prints more info in the test
+    --version:    shows current version
+    -h|--help:    shows this help
+    -d|--debug:   set logs to debug level
+    -t|--tenant:  set the tenant name to test. By default creates one
+    --datacenter: set the datacenter name to test. By default creates one at http://localhost:9080/openvim
+    -u|--url:     set the openmano server url. By default 'http://localhost:9090/openmano'
+    --image:      use this image path for testing a VNF. By default a fake one is generated, valid for VIM in test mode'
+    """
+
+    #import openmanoclient from relative path
+    module_info = imp.find_module("openmanoclient", [".."] )
+    Client = imp.load_module("Client", *module_info)
+    
+    streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
+    logging.basicConfig(format=streamformat)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "t:u:dhv", ["url=", "tenant=", "debug", "help", "version", "verbose", "datacenter=", "image="])
+    except getopt.GetoptError as err:
+        print ("Error: {}\n Try '{} --help' for more information".format(str(err), sys.argv[0]))
+        sys.exit(2)
+
+    debug = False
+    verbose = False
+    url = "http://localhost:9090/openmano"
+    to_delete_list=[]
+    test_tenant = None
+    test_datacenter = None
+    test_vim_tenant = None
+    test_image = None
+    for o, a in opts:
+        if o in ("-v", "--verbose"):
+            verbose = True
+        elif o in ("--version"):
+            print ("{} version".format(sys.argv[0]), __version__, version_date)
+            print ("(c) Copyright Telefonica")
+            sys.exit()
+        elif o in ("-h", "--help"):
+            print(usage)
+            sys.exit()
+        elif o in ("-d", "--debug"):
+            debug = True
+        elif o in ("-u", "--url"):
+            url = a
+        elif o in ("-t", "--tenant"):
+            test_tenant = a 
+        elif o in ("--datacenter"):
+            test_datacenter = a 
+        elif o in ("--image"):
+            test_image = a 
+        else:
+            assert False, "Unhandled option"
+
+    
+    
+    client = Client.openmanoclient(
+                            endpoint_url=url, 
+                            tenant_name=test_tenant,
+                            datacenter_name = test_datacenter,
+                            debug = debug)
+
+    import random
+    test_number=1
+    
+    #TENANTS
+    print("  {}. TEST create_tenant".format(test_number))
+    test_number += 1
+    long_name = _get_random_name(60)
+
+    tenant = client.create_tenant(name=long_name, description=long_name)
+    if verbose: print(tenant)
+
+    print("  {}. TEST list_tenants".format(test_number))
+    test_number += 1
+    tenants = client.list_tenants()
+    if verbose: print(tenants)
+    
+    print("  {}. TEST list_tenans filter by name".format(test_number))
+    test_number += 1
+    tenants_ = client.list_tenants(name=long_name)
+    if not tenants_["tenants"]:
+        raise Exception("Text error, no TENANT found with name")
+    if verbose: print(tenants_)
+    
+    print("  {}. TEST get_tenant by UUID".format(test_number))
+    test_number += 1
+    tenant = client.get_tenant(uuid=tenants_["tenants"][0]["uuid"])
+    if verbose: print(tenant)
+        
+    print("  {}. TEST delete_tenant by name".format(test_number))
+    test_number += 1
+    tenant = client.delete_tenant(name = long_name)
+    if verbose: print(tenant)
+    
+    if not test_tenant:
+        print("  {}. TEST create_tenant for remaining tests".format(test_number))
+        test_number += 1
+        test_tenant = "test-tenant "+\
+        ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        tenant = client.create_tenant(name = test_tenant)
+        if verbose: print(tenant)
+        client["tenant_name"] = test_tenant
+        
+        to_delete_list.insert(0,{"item": "tenant", "function": client.delete_tenant, "params":{"name": test_tenant} })
+
+    #DATACENTERS
+    print("  {}. TEST create_datacenter".format(test_number))
+    test_number += 1
+    long_name = _get_random_name(60)
+
+    datacenter = client.create_datacenter(name=long_name, vim_url="http://fakeurl/fake")
+    if verbose: print(datacenter)
+
+    print("  {}. TEST list_datacenters".format(test_number))
+    test_number += 1
+    datacenters = client.list_datacenters(all_tenants=True)
+    if verbose: print(datacenters)
+    
+    print("  {}. TEST list_tenans filter by name".format(test_number))
+    test_number += 1
+    datacenters_ = client.list_datacenters(all_tenants=True, name=long_name)
+    if not datacenters_["datacenters"]:
+        raise Exception("Text error, no TENANT found with name")
+    if verbose: print(datacenters_)
+    
+    print("  {}. TEST get_datacenter by UUID".format(test_number))
+    test_number += 1
+    datacenter = client.get_datacenter(uuid=datacenters_["datacenters"][0]["uuid"], all_tenants=True)
+    if verbose: print(datacenter)
+        
+    print("  {}. TEST delete_datacenter by name".format(test_number))
+    test_number += 1
+    datacenter = client.delete_datacenter(name=long_name)
+    if verbose: print(datacenter)
+    
+    if not test_datacenter:
+        print("  {}. TEST create_datacenter for remaining tests".format(test_number))
+        test_number += 1
+        test_datacenter = "test-datacenter "+\
+        ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        datacenter = client.create_datacenter(name=test_datacenter, vim_url="http://127.0.0.1:9080/openvim")
+        if verbose: print(datacenter)
+        client["datacenter_name"] = test_datacenter
+        to_delete_list.insert(0,{"item": "datacenter", "function": client.delete_datacenter,
+                                  "params":{
+                                        "name": test_datacenter
+                                    } 
+                                 })
+
+        print("  {}. TEST datacenter new tenenat".format(test_number))
+        test_number += 1
+        test_vim_tenant = "test-vimtenant "+\
+        ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True, name=test_vim_tenant)
+        if verbose: print(vim_tenant)
+        client["datacenter_name"] = test_datacenter
+        to_delete_list.insert(0,{"item": "vim_tenant", 
+                                 "function": client.vim_action,
+                                  "params":{
+                                            "action":"delete",
+                                            "item":"tenants",
+                                            "datacenter_name": test_datacenter,
+                                            "all_tenants": True,
+                                            "uuid": vim_tenant["tenant"]["id"]
+                                            }
+                                 })
+
+        print("  {}. TEST datacenter attach".format(test_number))
+        test_number += 1
+        datacenter = client.attach_datacenter(name=test_datacenter, vim_tenant_name=test_vim_tenant)
+        if verbose: print(datacenter)
+        client["datacenter_name"] = test_datacenter
+        to_delete_list.insert(0,{"item": "datacenter-detach", "function": client.detach_datacenter, "params":{"name": test_datacenter} })
+
+        client["datacenter_name"] = test_datacenter
+
+        # WIMs
+        print("  {}. TEST create_wim".format(test_number))
+        test_number += 1
+        long_name = _get_random_name(60)
+
+        wim = client.create_wim(name=long_name, wim_url="http://fakeurl/fake")
+        if verbose: print(wim)
+
+        print("  {}. TEST list_wims".format(test_number))
+        test_number += 1
+        wims = client.list_wims(all_tenants=True)
+        if verbose: print(wims)
+
+        print("  {}. TEST list_tenans filter by name".format(test_number))
+        test_number += 1
+        wims_ = client.list_wims(all_tenants=True, name=long_name)
+        if not wims_["wims"]:
+            raise Exception("Text error, no TENANT found with name")
+        if verbose: print(wims_)
+
+        print("  {}. TEST get_wim by UUID".format(test_number))
+        test_number += 1
+        wim = client.get_wim(uuid=wims_["wims"][0]["uuid"], all_tenants=True)
+        if verbose: print(wim)
+
+        print("  {}. TEST delete_wim by name".format(test_number))
+        test_number += 1
+        wim = client.delete_wim(name=long_name)
+        if verbose: print(wim)
+
+        print("  {}. TEST create_wim for remaining tests".format(test_number))
+        test_number += 1
+        test_wim = "test-wim " + \
+                          ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        wim = client.create_wim(name=test_wim, vim_url="http://127.0.0.1:9080/odl")
+        if verbose: print(wim)
+        to_delete_list.insert(0,
+                              {
+                                    "item": "wim", "function": client.delete_wim,
+                                    "params":
+                                        {
+                                                "name": test_wim
+                                        }
+                                })
+
+        test_wim_tenant = "test-wimtenant " + \
+                           ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+
+        # print("  {}. TEST datacenter new tenenat".format(test_number))
+        # test_number += 1
+        # test_vim_tenant = "test-vimtenant " + \
+        #                   ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        # vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True,
+        #                                name=test_vim_tenant)
+        # if verbose: print(vim_tenant)
+        # client["datacenter_name"] = test_datacenter
+        # to_delete_list.insert(0, {"item": "vim_tenant",
+        #                           "function": client.vim_action,
+        #                           "params": {
+        #                               "action": "delete",
+        #                               "item": "tenants",
+        #                               "datacenter_name": test_datacenter,
+        #                               "all_tenants": True,
+        #                               "uuid": vim_tenant["tenant"]["id"]
+        #                           }
+        #                           })
+
+        print("  {}. TEST wim attach".format(test_number))
+        test_number += 1
+        wim = client.attach_wim(name=test_wim, wim_tenant_name=test_wim_tenant)
+        if verbose: print(wim)
+        to_delete_list.insert(0, {"item": "wim-detach", "function": client.detach_wim,
+                                  "params": {"name": test_wim}})
+    
+    #VIM_ACTIONS
+    print("  {}. TEST create_VIM_tenant".format(test_number))
+    test_number += 1
+    long_name = _get_random_name(60)
+
+    tenant = client.vim_action("create", "tenants", name=long_name)
+    if verbose: print(tenant)
+    tenant_uuid = tenant["tenant"]["id"] 
+
+    print("  {}. TEST list_VIM_tenants".format(test_number))
+    test_number += 1
+    tenants = client.vim_action("list", "tenants")
+    if verbose: print(tenants)
+    
+    print("  {}. TEST get_VIM_tenant by UUID".format(test_number))
+    test_number += 1
+    tenant = client.vim_action("show", "tenants", uuid=tenant_uuid)
+    if verbose: print(tenant)
+        
+    print("  {}. TEST delete_VIM_tenant by id".format(test_number))
+    test_number += 1
+    tenant = client.vim_action("delete", "tenants", uuid = tenant_uuid)
+    if verbose: print(tenant)
+    
+    print("  {}. TEST create_VIM_network".format(test_number))
+    test_number += 1
+    long_name = _get_random_name(60)
+
+    network = client.vim_action("create", "networks", name=long_name)
+    if verbose: print(network)
+    network_uuid = network["network"]["id"] 
+
+    print("  {}. TEST list_VIM_networks".format(test_number))
+    test_number += 1
+    networks = client.vim_action("list", "networks")
+    if verbose: print(networks)
+    
+    print("  {}. TEST get_VIM_network by UUID".format(test_number))
+    test_number += 1
+    network = client.vim_action("show", "networks", uuid=network_uuid)
+    if verbose: print(network)
+        
+    print("  {}. TEST delete_VIM_network by id".format(test_number))
+    test_number += 1
+    network = client.vim_action("delete", "networks", uuid = network_uuid)
+    if verbose: print(network)
+    #VNFS
+    print("  {}. TEST create_vnf".format(test_number))
+    test_number += 1
+    test_vnf_name = _get_random_name(255)
+    if test_image:
+        test_vnf_path = test_image
+    else:
+        test_vnf_path = "/random/path/" + "".join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ') for _ in range(20))
+    
+    vnf_descriptor={'vnf': {'name': test_vnf_name, 
+                                'VNFC': [{'description': _get_random_name(255),
+                                          'name': 'linux-VM',
+                                          'VNFC image': test_vnf_path,
+                                          'ram': 1024,
+                                          'vcpus': 1,
+                                          'bridge-ifaces': [{'name': 'eth0'}]
+                                        }],
+                                'description': _get_random_name(255),
+                                'nets': [], 
+                                'external-connections': [{'name': 'eth0', 
+                                                          'local_iface_name': 'eth0',
+                                                          'VNFC': 'linux-VM',
+                                                          'type': 'bridge'}], 
+                                'public': False}}
+
+    vnf = client.create_vnf(descriptor=vnf_descriptor)
+    if verbose: print(vnf)
+    to_delete_list.insert(0,{"item": "vnf", "function": client.delete_vnf, "params":{"name": test_vnf_name} })
+
+    print("  {}. TEST list_vnfs".format(test_number))
+    test_number += 1
+    vnfs = client.list_vnfs()
+    if verbose: print(vnfs)
+    
+    print("  {}. TEST list_vnfs filter by name".format(test_number))
+    test_number += 1
+    vnfs_ = client.list_vnfs(name=test_vnf_name)
+    if not vnfs_["vnfs"]:
+        raise Exception("Text error, no VNF found with name")
+    if verbose: print(vnfs_)
+    
+    print("  {}. TEST get_vnf by UUID".format(test_number))
+    test_number += 1
+    vnf = client.get_vnf(uuid=vnfs_["vnfs"][0]["uuid"])
+    if verbose: print(vnf)
+
+    #SCENARIOS
+    print("  {}. TEST create_scenario".format(test_number))
+    test_number += 1
+    test_scenario_name = _get_random_name(255)
+    
+    scenario_descriptor={   'schema_version': 2,
+                            'scenario': {
+                                'name': test_scenario_name, 
+                                'description': _get_random_name(255),
+                                'public': True,
+                                'vnfs':{
+                                    'vnf1': {
+                                        'vnf_name': test_vnf_name
+                                    }
+                                },
+                                'networks':{
+                                    'net1':{
+                                        'external': True,
+                                        'interfaces': [
+                                            {'vnf1': 'eth0'}
+                                        ]
+                                    }
+                                }
+                            }
+                        }
+
+    scenario = client.create_scenario(descriptor=scenario_descriptor)
+    if verbose: print(scenario)
+    to_delete_list.insert(0,{"item": "scenario", "function": client.delete_scenario, "params":{"name": test_scenario_name} })
+
+    print("  {}. TEST list_scenarios".format(test_number))
+    test_number += 1
+    scenarios = client.list_scenarios()
+    if verbose: print(scenarios)
+    
+    print("  {}. TEST list_scenarios filter by name".format(test_number))
+    test_number += 1
+    scenarios_ = client.list_scenarios(name=test_scenario_name)
+    if not scenarios_["scenarios"]:
+        raise Exception("Text error, no VNF found with name")
+    if verbose: print(scenarios_)
+    
+    print("  {}. TEST get_scenario by UUID".format(test_number))
+    test_number += 1
+    scenario = client.get_scenario(uuid=scenarios_["scenarios"][0]["uuid"])
+    if verbose: print(scenario)
+
+
+
+    #INSTANCES
+    print("  {}. TEST create_instance".format(test_number))
+    test_number += 1
+    test_instance_name = _get_random_name(255)
+    
+    instance_descriptor={   'schema_version': 2,
+                            'instance': {
+                                'name': test_instance_name, 
+                                'description': _get_random_name(255),
+                                'public': True,
+                                'vnfs':{
+                                    'vnf1': {
+                                        'vnf_name': test_vnf_name
+                                    }
+                                },
+                                'networks':{
+                                    'net1':{
+                                        'external': True,
+                                        'interfaces': [
+                                            {'vnf1': 'eth0'}
+                                        ]
+                                    }
+                                }
+                            }
+                        }
+
+    instance = client.create_instance(scenario_name=test_scenario_name, name=test_instance_name )
+    if verbose: print(instance)
+    to_delete_list.insert(0,{"item": "instance", "function": client.delete_instance, "params":{"name": test_instance_name} })
+
+    print("  {}. TEST list_instances".format(test_number))
+    test_number += 1
+    instances = client.list_instances()
+    if verbose: print(instances)
+    
+    print("  {}. TEST list_instances filter by name".format(test_number))
+    test_number += 1
+    instances_ = client.list_instances(name=test_instance_name)
+    if not instances_["instances"]:
+        raise Exception("Text error, no VNF found with name")
+    if verbose: print(instances_)
+    
+    print("  {}. TEST get_instance by UUID".format(test_number))
+    test_number += 1
+    instance = client.get_instance(uuid=instances_["instances"][0]["uuid"])
+    if verbose: print(instance)
+
+
+
+
+    #DELETE Create things
+    for item in to_delete_list:
+        print("  {}. TEST delete_{}".format(test_number, item["item"]))
+        test_number += 1
+        response = item["function"](**item["params"]) 
+        if verbose: print(response)
+    
diff --git a/RO/test/test_osconnector.py b/RO/test/test_osconnector.py
new file mode 100755 (executable)
index 0000000..3619ae4
--- /dev/null
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+test_osconnector.py makes a test over osconnector.py (openstack connector)
+credentiasl must be provided with environment bash variables or arguments
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$22-jun-2014 11:19:29$"
+
+
+import os
+import sys
+import getopt
+#import yaml
+#from jsonschema import validate as js_v, exceptions as js_e
+
+#load osconnector, insert openmano directory in the path
+r=sys.argv[0].rfind('/')
+if r<0:
+    osconnector_path=".."
+else:
+    osconnector_path=sys.argv[0][:r+1]+".."
+sys.path.insert(0, osconnector_path)
+#sys.path.insert(0, '/home/atierno/workspace/openmano/openmano')
+import osconnector
+
+version="0.1"
+
+def usage():
+    print("Usage: ", sys.argv[0], "[options]")
+    print("  -v|--version            openstack version (by default 2)")
+    print("  -u|--username USER      user to authenticate (by default bash:OS_USERNAME)")
+    print("  -p|--password PASSWD    password to authenticate (by default bash:OS_PASSWORD)")
+    print("  -U|--auth_url URL       url of authentication over keystone (by default bash:OS_AUTH_URL)")
+    print("  -t|--tenant_name TENANT password to authenticate (by default bash:OS_TENANT_NAME)")
+    print("  -i|--image IMAGE        use this local path or url for loading image (by default cirros)")
+    print("  --skip-admin-tests      skip tests that requires administrative permissions, like create tenants")
+    print("  -h|--help               shows this help")
+    return
+
+def delete_items():
+    global myvim
+    global rollback_list
+    print("Making rollback, deleting items")
+    for i in range(len(rollback_list)-1, -1, -1):
+        item,name,id_ = rollback_list[i]
+        if item=="creds":
+            print("changing credentials {}='{}'".format(name, id_)).ljust(50),
+        else:
+            print("deleting {} '{}'".format(item, name)).ljust(50),
+        sys.stdout.flush()
+        if item=="flavor":
+            result,message=myvim.delete_tenant_flavor(id_)
+        elif item=="image":
+            result,message=myvim.delete_tenant_image(id_)
+        elif item=="tenant":
+            result,message=myvim.delete_tenant(id_)
+        elif item=="user":
+            result,message=myvim.delete_user(id_)
+        elif item=="network":
+            result,message=myvim.delete_tenant_network(id_)
+        elif item=="vm":
+            result,message=myvim.delete_tenant_vminstance(id_)
+        elif item=="creds":
+            try:
+                myvim[name]=id_
+                result=1
+            except Exception as e:
+                result=-1
+                message= "  " + str(type(e))[6:-1] + ": "+  str(e)
+        else:
+            print("Internal error unknown item rollback {},{},{}".format(item,name,id_))
+            continue
+        if result<0:
+            print(" Fail")
+            print("  VIM response:", message)
+            continue
+        else:
+            print(" Ok")
+
+if __name__=="__main__":
+    global myvim
+    global rollback_list
+    #print("(c) Copyright Telefonica"
+    rollback_list=[]
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "hv:u:U:p:t:i:",
+                 ["username=", "help", "version=", "password=", "tenant=", "url=","skip-admin-tests",'image='])
+    except getopt.GetoptError as err:
+        # print(help information and exit:
+        print("Error:", err) # will print something like "option -a not recognized")
+        usage()
+        sys.exit(2)
+        
+    creds = {}
+    creds['version'] = os.environ.get('OS_VERSION', '2')
+    creds['username'] = os.environ.get('OS_USERNAME')
+    creds['password'] = os.environ.get('OS_PASSWORD')
+    creds['auth_url'] = os.environ.get('OS_AUTH_URL')
+    creds['tenant_name'] = os.environ.get('OS_TENANT_NAME')
+    skip_admin_tests=False
+    image_path="http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img"
+    for o, a in opts:
+        if o in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif o in ("-v", "--version"):
+            creds['version']=a
+        elif o in ("-u", "--username"):
+            creds['username']=a
+        elif o in ("-p", "--password"):
+            creds['password']=a
+        elif o in ("-U", "--auth_url"):
+            creds['auth_url']=a
+        elif o in ("-t", "--tenant_name"):
+            creds['tenant_name']=a
+        elif o in ("-i", "--image"):
+            image_path=a
+        elif o=="--skip-admin-tests":
+            skip_admin_tests=True
+        else:
+            assert False, "Unhandled option"
+    if creds['auth_url']==None:
+        print("you must provide openstack url with -U or bash OS_AUTH_URL")
+        sys.exit()
+    print("creds:", creds)
+    
+
+    try:
+        print('load osconnector class'.ljust(50))
+        sys.stdout.flush()
+        try:
+            myvim=osconnector.osconnector(uuid=None, name='test-openstack', tenant=creds['tenant_name'], 
+                url=creds['auth_url'], url_admin=None,
+                user=creds['username'], passwd=creds['password'],
+                debug = False, config={'network_vlan_ranges':'physnet_sriov'} )
+            print(" Ok")
+        except Exception as e:
+            print(" Fail")
+            print(str(type(e))[6:-1] + ": "+  str(e))
+            exit(-1)
+        
+        if not skip_admin_tests:
+            tenant_name="tos-tenant"
+            print("creating new tenant '{}'".format(tenant_name)).ljust(50),
+            sys.stdout.flush()
+            result,new_tenant=myvim.new_tenant(tenant_name, "test tenant_description, trying a long description to get the limit. 2 trying a long description to get the limit. 3. trying a long description to get the limit.")
+            if result<0:
+                print(" Fail")
+                print("  you can skip tenant creation with param'--skip-admin-tests'")
+                print("  VIM response:", new_tenant)
+                exit(-1)
+            else:
+                print(" Ok", new_tenant)
+                rollback_list.append(("tenant",tenant_name,new_tenant))
+
+            user_name="tos-user"
+            print("creating new user '{}'".format(user_name).ljust(50), end="")
+            sys.stdout.flush()
+            result,new_user=myvim.new_user(user_name, user_name, tenant_id=new_tenant)
+            if result<0:
+                print(" Fail")
+                print("  VIM response:", new_user)
+                exit(-1)
+            else:
+                print(" Ok", new_user)
+                rollback_list.append(("user",user_name,new_user))
+                    
+        name="tos-fl1"
+        print("creating new flavor '{}'".format(name)).ljust(50),
+        sys.stdout.flush()
+        flavor={}
+        flavor['name']=name
+        result,new_flavor1=myvim.new_tenant_flavor(flavor, True)
+        if result<0:
+            print(" Fail")
+            print("  VIM response:", new_flavor1)
+            exit(-1)
+        else:
+            print(" Ok", new_flavor1)
+            rollback_list.append(("flavor",name,new_flavor1))
+            
+        name="tos-cirros"
+        print("creating new image '{}'".format(name).ljust(50))
+        sys.stdout.flush()
+        image={}
+        image['name']=name
+        image['location']=image_path #"/home/atierno/cirros-0.3.3-x86_64-disk.img"
+        result,new_image1=myvim.new_tenant_image(image)
+        if result<0:
+            print(" Fail")
+            print("  VIM response:", new_image1)
+            exit(-1)
+        else:
+            print(" Ok", new_image1)
+            rollback_list.append(("image",name, new_image1))
+
+        if not skip_admin_tests:
+            try:
+                print('changing credentials to new tenant'.ljust(50))
+                sys.stdout.flush()
+                myvim['tenant']  =tenant_name
+                myvim['user']=user_name
+                myvim['passwd']=user_name
+                print(" Ok")
+                rollback_list.append(("creds", "tenant", creds["tenant_name"]))
+                rollback_list.append(("creds", "user",   creds["username"]))
+                rollback_list.append(("creds", "passwd", creds["password"]))
+            except Exception as e:
+                print(" Fail")
+                print(" Error setting osconnector to new tenant:", str(type(e))[6:-1] + ": "+  str(e))
+                exit(-1)
+
+        name="tos-net-bridge"
+        print("creating new net '{}'".format(name)).ljust(50),
+        sys.stdout.flush()
+        result,new_net1=myvim.new_tenant_network(name, "bridge")
+        if result<0:
+            print(" Fail")
+            print("  VIM response:", new_net1)
+            exit(-1)
+        else:
+            print(" Ok", new_net1)
+            rollback_list.append(("network",name, new_net1))
+
+        name="tos-vm-cloud"
+        print("creating new VM '{}'".format(name).ljust(50))
+        sys.stdout.flush()
+        result,new_vm1=myvim.new_tenant_vminstance(name, "vm-cloud-description", False,new_image1,new_flavor1,
+                                    [{"net_id":new_net1, "type":"virtio"}] )
+        if result<0:
+            print(" Fail")
+            print("  VIM response:", new_vm1)
+            exit(-1)
+        else:
+            print(" Ok", new_vm1)
+            rollback_list.append(("vm",name, new_vm1))
+
+            
+        print('DONE  Ok')
+        print("Type ENTER to delete items")
+        input('> ')
+        exit()      
+              
+    except KeyboardInterrupt:
+        print(" Canceled!")
+    except SystemExit:
+        pass
+    if len(rollback_list):
+        delete_items()
+
diff --git a/RO/test/test_vimconn.sh b/RO/test/test_vimconn.sh
new file mode 100755 (executable)
index 0000000..0f84af3
--- /dev/null
@@ -0,0 +1,294 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano deployment over a vim
+#in order to use you need to set the VIM_XXXX bash variables with a vim values
+#    VIM_TYPE         openstack or openvim
+#    VIM_USERNAME     e.g.: admin
+#    VIM_PASSWORD     
+#    VIM_AUTH_URL     url to access VIM e.g. http:/openstack:35357/v2.0
+#    VIM_AUTH_URL_ADMIN admin url
+#    VIM_TENANT_NAME  e.g.: admin
+#    VIM_CONFIG       e.g.: "'network_vlan_ranges: sriov_net'"
+#    VIM_TEST_IMAGE_PATH_LINUX  image path(location) to use by the VNF linux
+#    VIM_TEST_IMAGE_PATH_NFV image path(location) to use by the VNF dataplaneVNF_2VMs and dataplaneVNF3
+
+#it should be used with source. It can modifies /home/$USER/.bashrc appending the variables
+#you need to delete them manually if desired
+
+function usage(){
+    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test VIM managing from openmano"
+    echo -e "  <action> is a list of the following items (by default 'reset create')"
+    echo -e "    reset     reset the openmano database content"
+    echo -e "    create    creates items at VIM"
+    echo -e "    delete    delete created items"
+    echo -e "  OPTIONS:"
+    echo -e "    -f --force       does not prompt for confirmation"
+    echo -e "    -h --help        shows this help"
+    echo -e "    --insert-bashrc  insert the created tenant,datacenter variables at"
+    echo -e "                     ~/.bashrc to be available by openmano config"
+}
+
+function is_valid_uuid(){
+    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+    echo "$1" | grep -q -E '^[0-9a-f]{32}$' && return 0
+    return 1
+}
+
+#detect if is called with a source to use the 'exit'/'return' command for exiting
+[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
+
+#detect if environment variables are set
+fail=""
+[[ -z $VIM_TYPE ]]     && echo "VIM_TYPE variable not defined" >&2 && fail=1
+[[ -z $VIM_USERNAME ]] && echo "VIM_USERNAME variable not defined" >&2 && fail=1
+[[ -z $VIM_PASSWORD ]] && echo "VIM_PASSWORD variable not defined" >&2 && fail=1
+[[ -z $VIM_AUTH_URL ]] && echo "VIM_AUTH_URL variable not defined" >&2 && fail=1
+[[ -z $VIM_TENANT_NAME ]] && [[ -z $VIM_TENANT_NAME ]] && echo "neither VIM_TENANT_NAME not VIM_TENANT_ID variables are not defined" >&2 && fail=1
+[[ -z $VIM_CONFIG ]] && echo "VIM_CONFIG variable not defined" >&2 && fail=1
+[[ -z $VIM_TEST_IMAGE_PATH_LINUX ]] && echo "VIM_TEST_IMAGE_PATH_LINUX variable not defined" >&2 && fail=1
+[[ -z $VIM_TEST_IMAGE_PATH_NFV ]]   && echo "VIM_TEST_IMAGE_PATH_NFV variable not defined" >&2 && fail=1
+[[ -n $fail ]] && $_exit 1
+
+#check correct arguments
+action_list=""
+for param in $*
+do
+   if [[ $param == reset ]] || [[ $param == create ]] || [[ $param == delete ]]
+   then 
+       action_list="$action_list $param"
+   elif [[ $param == -h ]] || [[ $param == --help ]]
+   then
+       usage
+       $_exit 0
+   elif [[ $param == -f ]] || [[ $param == --force ]]
+   then
+       force=y
+   elif [[ $param == --insert-bashrc ]]
+   then
+       insert_bashrc=y
+   else
+       echo "invalid argument '$param'?" &&  usage >&2 && $_exit 1
+   fi
+done
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+#by default action should be reset and create
+[[ -z $action_list ]] && action_list="reset create"
+
+for action in $action_list
+do
+if [[ $action == "reset" ]] 
+then 
+
+    #ask for confirmation if argument is not -f --force
+    [[ $force != y ]] && read -e -p "WARNING: reset openmano database, content will be lost!!! Continue(y/N)" force
+    [[ $force != y ]] && [[ $force != yes ]] && echo "aborted!" && $_exit
+
+    echo "Stopping openmano"
+    $DIRscript/service-openmano mano stop
+    echo "Initializing openmano database"
+    $DIRmano/database_utils/init_mano_db.sh -u mano -p manopw --createdb
+    echo "Starting openmano"
+    $DIRscript/service-openmano mano start
+
+elif [[ $action == "delete" ]]
+then
+    result=`openmano tenant-list TESTVIM-tenant`
+    nfvotenant=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    is_valid_uuid $nfvotenant || ! echo "Tenant TESTVIM-tenant not found. Already delete?" >&2 || $_exit 1
+    export OPENMANO_TENANT=$nfvotenant
+    openmano instance-scenario-delete -f simple-instance     || echo "fail"
+    openmano instance-scenario-delete -f complex2-instance   || echo "fail"
+    openmano instance-scenario-delete -f complex4-instance   || echo "fail"
+    openmano scenario-delete -f simple       || echo "fail"
+    openmano scenario-delete -f complex2     || echo "fail"
+    openmano scenario-delete -f complex3     || echo "fail"
+    openmano scenario-delete -f complex4     || echo "fail"
+    openmano vnf-delete -f linux             || echo "fail"
+    openmano vnf-delete -f linux_2VMs_v02    || echo "fail"
+    openmano vnf-delete -f dataplaneVNF_2VMs || echo "fail"
+    openmano vnf-delete -f dataplaneVNF3     || echo "fail"
+    openmano vnf-delete -f TESTVIM-VNF1          || echo "fail"
+    openmano datacenter-detach TESTVIM-dc        || echo "fail"
+    openmano datacenter-delete -f TESTVIM-dc     || echo "fail"
+    openmano tenant-delete -f TESTVIM-tenant     || echo "fail"
+
+elif [[ $action == "create" ]]
+then 
+
+    printf "%-50s" "Creating openmano tenant 'TESTVIM-tenant': "
+    result=`openmano tenant-create TESTVIM-tenant --description="created by test_vimconn.sh"`
+    nfvotenant=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $nfvotenant && echo "FAIL" && echo "    $result" && $_exit 1 
+    export OPENMANO_TENANT=$nfvotenant
+    [[ $insert_bashrc == y ]] && echo -e "\nexport OPENMANO_TENANT=$nfvotenant"  >> ~/.bashrc
+    echo $nfvotenant
+
+    printf "%-50s" "Creating datacenter 'TESTVIM-dc' in openmano:"
+    URL_ADMIN_PARAM=""
+    [[ -n $VIM_AUTH_URL_ADMIN ]] && URL_ADMIN_PARAM="--url_admin=$VIM_AUTH_URL_ADMIN"
+    result=`openmano datacenter-create TESTVIM-dc "${VIM_AUTH_URL}" "--type=$VIM_TYPE" $URL_ADMIN_PARAM "--config=${VIM_CONFIG}"`
+    datacenter=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && $_exit 1 
+    echo $datacenter
+    export OPENMANO_DATACENTER=$datacenter
+    [[ $insert_bashrc == y ]] && echo -e "\nexport OPENMANO_DATACENTER=$datacenter"  >> ~/.bashrc
+
+    printf "%-50s" "Attaching openmano tenant to the datacenter:"
+    [[ -n $VIM_PASSWORD ]]    && passwd_param="--password=$VIM_PASSWORD"                    || passwd_param=""
+    [[ -n $VIM_TENANT_NAME ]] && vim_tenant_name_param="--vim-tenant-name=$VIM_TENANT_NAME" || vim_tenant_name_param=""
+    [[ -n $VIM_TENANT_ID   ]] && vim_tenant_id_param="--vim-tenant-id=$VIM_TENANT_ID"       || vim_tenant_id_param=""
+    [[ -n $VIM_PASSWORD ]] && passwd_param="--password=$VIM_PASSWORD" || passwd_param=""
+    result=`openmano datacenter-attach TESTVIM-dc "--user=$VIM_USERNAME" "$passwd_param" "$vim_tenant_name_param"`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result" && $_exit 1
+    echo OK
+
+    printf "%-50s" "Updating external nets in openmano: "
+    result=`openmano datacenter-netmap-delete -f --all`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
+    result=`openmano datacenter-netmap-import -f`
+    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
+    echo OK
+
+    printf "%-50s" "Creating VNF 'linux': "
+    #glance image-create --file=./US1404dpdk.qcow2 --name=US1404dpdk --disk-format=qcow2 --min-disk=2 --is-public=True --container-format=bare
+    #nova image-meta US1404dpdk set location=/mnt/powervault/virtualization/vnfs/os/US1404dpdk.qcow2
+    #glance image-create --file=./US1404user.qcow2 --min-disk=2 --is-public=True --container-format=bare --name=US1404user --disk-format=qcow2
+    #nova image-meta US1404user  set location=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2
+    result=`openmano vnf-create $DIRmano/vnfs/examples/linux.yaml "--image-path=$VIM_TEST_IMAGE_PATH_LINUX"`
+    vnf=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" &&  $_exit 1
+    echo $vnf
+    
+    printf "%-50s" "Creating VNF 1PF,1VF,2GB,4PThreads: "
+    result=`openmano vnf-create "vnf:
+        name: TESTVIM-VNF1
+        external-connections:
+        - name: eth0
+          type: mgmt
+          VNFC: TESTVIM-VNF1-VM
+          local_iface_name: eth0
+        - name: PF0
+          type: data
+          VNFC: TESTVIM-VNF1-VM
+          local_iface_name: PF0
+        - name: VF0
+          type: data
+          VNFC: TESTVIM-VNF1-VM
+          local_iface_name: VF0
+        VNFC: 
+        - name: TESTVIM-VNF1-VM
+          VNFC image: $VIM_TEST_IMAGE_PATH_NFV
+          numas:
+          - paired-threads: 2
+            paired-threads-id: [ [0,2], [1,3] ]
+            memory: 2
+            interfaces:
+            - name:  PF0
+              vpci: '0000:00:11.0'
+              dedicated: 'yes'
+              bandwidth: 10 Gbps
+              mac_address: '20:33:45:56:77:44'
+            - name:  VF0
+              vpci:  '0000:00:12.0'
+              dedicated: 'no'
+              bandwidth: 1 Gbps
+              mac_address: '20:33:45:56:77:45'
+          bridge-ifaces:
+          - name: eth0
+            vpci: '0000:00:09.0'
+            bandwidth: 1 Mbps
+            mac_address: '20:33:45:56:77:46'
+            model: e1000
+       "`
+    vnf=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
+    echo $vnf
+    printf "%-50s" "Creating VNF 'dataplaneVNF_2VMs': "
+    result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF_2VMs.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
+    vnf=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
+    echo $vnf
+    printf "%-50s" "Creating VNF 'dataplaneVNF3.yaml': "
+    result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF3.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV"`
+    vnf=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
+    echo $vnf
+
+    printf "%-50s" "Creating VNF 'dataplaneVNF_2VMs_v02': "
+    result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF_2VMs_v02.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
+    vnf=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
+    echo $vnf
+
+    printf "%-50s" "Creating VNF 'linux_2VMs_v02': "
+    result=`openmano vnf-create $DIRmano/vnfs/examples/linux_2VMs_v02.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
+    vnf=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
+    echo $vnf
+
+    for sce in simple complex2 complex3 complex4
+    do
+      printf "%-50s" "Creating scenario '$sce':"
+      result=`openmano scenario-create $DIRmano/scenarios/examples/${sce}.yaml`
+      scenario=`echo $result |gawk '{print $1}'`
+      ! is_valid_uuid $scenario && echo FAIL && echo "    $result" &&  $_exit 1
+      echo $scenario
+    done
+    
+    #USER_KEY=""
+    key_param1=""
+    key_param2=""
+    #add user keys if present at .ssh    
+    ls ${HOME}/.ssh/*.pub > /dev/null 2>&1 && key_param1=--keypair-auto
+
+    for sce in simple complex2
+    do 
+      printf "%-50s" "Deploying scenario '$sce':"
+      result=`openmano instance-scenario-create --scenario $sce --name ${sce}-instance "$key_param1" "$key_param2"`
+      instance=`echo $result |gawk '{print $1}'`
+      ! is_valid_uuid $instance && echo FAIL && echo "    $result" && $_exit 1
+      echo $instance
+    done
+
+    #Testing IP parameters in networks
+    printf "%-50s" "Deploying scenario 'complex4' with IP parameters in networks:"
+    result=`openmano instance-scenario-create $DIRmano/instance-scenarios/examples/instance-creation-complex4.yaml "$key_param1" "$key_param2"`
+    instance=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $instance && echo FAIL && echo "    $result" && $_exit 1
+    echo $instance
+
+    echo
+    echo DONE
+fi
+done
+
diff --git a/RO/tox.ini b/RO/tox.ini
new file mode 100644 (file)
index 0000000..810ab39
--- /dev/null
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tox]
+#envlist = py27,py3
+envlist = py35
+toxworkdir={homedir}/.tox
+
+[testenv]
+deps=nose
+     mock
+commands=nosetests
+
+[testenv:flake8]
+basepython = python
+deps = flake8
+# TODO for the moment few files are tested.
+commands = flake8 osm_ro/wim  --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
diff --git a/database_utils/dump_db.sh b/database_utils/dump_db.sh
deleted file mode 100755 (executable)
index 89c83f0..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-
-LICENSE_HEAD='/**
-* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
-* This file is part of openmano
-* All Rights Reserved.
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-*
-* For those usages not covered by the Apache License, Version 2.0 please
-* contact with: nfvlabs@tid.es
-**/
-'
-
-DBUSER="mano"
-DBPASS=""
-DBHOST="localhost"
-DBPORT="3306"
-DBNAME="mano_db"
-# Detect paths
-MYSQL=$(which mysql)
-AWK=$(which awk)
-GREP=$(which grep)
-DIRNAME=`dirname $(readlink -f $0)`
-function usage(){
-    echo -e "Usage: $0 OPTIONS"
-    echo -e "  Dumps openmano database content"
-    echo -e "  OPTIONS"
-    echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
-    echo -e "     -p PASS  database password. 'No password' by default. Prompts if DB access fails"
-    echo -e "     -P PORT  database port. '$DBPORT' by default"
-    echo -e "     -h HOST  database host. '$DBHOST' by default"
-    echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
-    echo -e "     --help   shows this help"
-}
-
-while getopts ":u:p:P:h:-:" o; do
-    case "${o}" in
-        u)
-            DBUSER="$OPTARG"
-            ;;
-        p)
-            DBPASS="$OPTARG"
-            ;;
-        P)
-            DBPORT="$OPTARG"
-            ;;
-        d)
-            DBNAME="$OPTARG"
-            ;;
-        h)
-            DBHOST="$OPTARG"
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            echo "Invalid option: --$OPTARG" >&2 && usage  >&2
-            exit 1
-            ;;
-        \?)
-            echo "Invalid option: -$OPTARG" >&2 && usage  >&2
-            exit 1
-            ;;
-        :)
-            echo "Option -$OPTARG requires an argument." >&2 && usage  >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit -1
-            ;;
-    esac
-done
-shift $((OPTIND-1))
-
-#check and ask for database user password
-DBUSER_="-u$DBUSER"
-DBPASS_=""
-[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
-DBHOST_="-h$DBHOST"
-DBPORT_="-P$DBPORT"
-while !  echo ";" | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME >/dev/null 2>&1
-do
-        [ -n "$logintry" ] &&  echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
-        [ -z "$logintry" ] &&  echo -e "\nProvide database name and credentials"
-        read -e -p "mysql database name($DBNAME): " KK
-        [ -n "$KK" ] && DBNAME="$KK"
-        read -e -p "mysql user($DBUSER): " KK
-        [ -n "$KK" ] && DBUSER="$KK" && DBUSER_="-u$DBUSER"
-        read -e -s -p "mysql password: " DBPASS
-        [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
-        [ -z "$DBPASS" ] && DBPASS_=""
-        logintry="yes"
-        echo
-done
-
-#echo structure, including the content of schema_version
-echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_structure.sql
-mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-data --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_structure.sql
-echo -e "\n\n\n\n" >> ${DIRNAME}/${DBNAME}_structure.sql
-mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME --tables schema_version 2>/dev/null  >> ${DIRNAME}/${DBNAME}_structure.sql
-echo "    ${DIRNAME}/${DBNAME}_structure.sql"
-
-#echo only data
-echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_data.sql #copy my own header
-mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME >> ${DIRNAME}/${DBNAME}_data.sql
-echo "    ${DIRNAME}/${DBNAME}_data.sql"
-
-#echo all
-echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_all.sql #copy my own header
-mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_all.sql
-echo "    ${DIRNAME}/${DBNAME}_all.sql"
-
diff --git a/database_utils/init_mano_db.sh b/database_utils/init_mano_db.sh
deleted file mode 100755 (executable)
index 147ea38..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-DBUSER="mano"
-DBPASS=""
-DEFAULT_DBPASS="manopw"
-DBHOST=""
-DBPORT="3306"
-DBNAME="mano_db"
-QUIET_MODE=""
-CREATEDB=""
-
-# Detect paths
-MYSQL=$(which mysql)
-AWK=$(which awk)
-GREP=$(which grep)
-DIRNAME=`dirname $(readlink -f $0)`
-
-function usage(){
-    echo -e "Usage: $0 OPTIONS [version]"
-    echo -e "  Inits openmano database; deletes previous one and loads from ${DBNAME}_structure.sql"\
-    echo -e "   and data from host_ranking.sql, nets.sql, of_ports_pci_correspondece*.sql"
-            "If [version]  is not provided, it is upgraded to the last version"
-    echo -e "  OPTIONS"
-    echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
-    echo -e "     -p PASS  database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
-    echo -e "     -P PORT  database port. '$DBPORT' by default"
-    echo -e "     -h HOST  database host. 'localhost' by default"
-    echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
-    echo -e "     -q --quiet: Do not prompt for credentials and exit if cannot access to database"
-    echo -e "     --createdb   forces the deletion and creation of the database"
-    echo -e "     --help   shows this help"
-}
-
-while getopts ":u:p:P:h:d:q-:" o; do
-    case "${o}" in
-        u)
-            DBUSER="$OPTARG"
-            ;;
-        p)
-            DBPASS="$OPTARG"
-            ;;
-        P)
-            DBPORT="$OPTARG"
-            ;;
-        d)
-            DBNAME="$OPTARG"
-            ;;
-        h)
-            DBHOST="$OPTARG"
-            ;;
-        q)
-            export QUIET_MODE="-q"
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE="-q" && continue
-            [ "${OPTARG}" == "createdb" ] && export CREATEDB=yes && continue
-            echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
-            exit 1
-            ;;
-        \?)
-            echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit 1
-            ;;
-    esac
-done
-shift $((OPTIND-1))
-
-DB_VERSION=$1
-
-if [ -n "$DB_VERSION" ] ; then
-    # check it is a number and an allowed one
-    [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null || 
-        ! echo "parameter 'version' requires a integer value" >&2 || exit 1
-fi
-
-# Creating temporary file
-TEMPFILE="$(mktemp -q --tmpdir "initdb.XXXXXX")"
-trap 'rm -f "$TEMPFILE"' EXIT
-chmod 0600 "$TEMPFILE"
-DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
-echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
-
-if [ -n "${CREATEDB}" ] ; then
-    FIRST_TRY="yes"
-    while ! DB_ERROR=`mysqladmin "$DEF_EXTRA_FILE_PARAM" -s status 2>&1 >/dev/null` ; do
-        # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
-        [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
-            echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
-            continue
-        echo "$DB_ERROR"
-        [[ -n "$QUIET_MODE" ]] && echo -e "Invalid admin database credentials!!!" >&2 && exit 1
-        echo -e "Provide database credentials (Ctrl+c to abort):"
-        read -e -p "    mysql user($DBUSER): " KK
-        [ -n "$KK" ] && DBUSER="$KK"
-        read -e -s -p "    mysql password: " DBPASS
-        echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
-        FIRST_TRY=""
-        echo
-    done
-    # echo "    deleting previous database ${DBNAME} if it exists"
-    mysqladmin $DEF_EXTRA_FILE_PARAM DROP "${DBNAME}" -f && echo "Previous database deleted"
-    echo "    creating database ${DBNAME}"
-    mysqladmin $DEF_EXTRA_FILE_PARAM create "${DBNAME}" || exit 1
-fi
-
-# Check and ask for database user password
-FIRST_TRY="yes"
-while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
-do
-    # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
-    [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
-        echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
-        continue
-    echo "$DB_ERROR"
-    [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
-    echo -e "Provide database name and credentials (Ctrl+c to abort):"
-    read -e -p "    mysql database name($DBNAME): " KK
-    [ -n "$KK" ] && DBNAME="$KK"
-    read -e -p "    mysql user($DBUSER): " KK
-    [ -n "$KK" ] && DBUSER="$KK"
-    read -e -s -p "    mysql password: " DBPASS
-    echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
-    FIRST_TRY=""
-    echo
-done
-
-DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
-DBUSER_="" && [ -n "$DBUSER" ] && DBUSER_="-u$DBUSER"
-DBPASS_="" && [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS"
-DBHOST_="" && [ -n "$DBHOST" ] && DBHOST_="-h$DBHOST"
-DBPORT_="-P$DBPORT"
-
-echo "    loading ${DIRNAME}/mano_db_structure.sql"
-sed -e "s/{{mano_db}}/$DBNAME/" ${DIRNAME}/mano_db_structure.sql | mysql $DEF_EXTRA_FILE_PARAM ||
-    ! echo "ERROR at init $DBNAME" || exit 1
-
-echo "    migrage database version"
-# echo "${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION"
-${DIRNAME}/migrate_mano_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $QUIET_MODE $DB_VERSION
-
diff --git a/database_utils/install-db-server.sh b/database_utils/install-db-server.sh
deleted file mode 100755 (executable)
index 36b8003..0000000
+++ /dev/null
@@ -1,296 +0,0 @@
-#!/usr/bin/env bash
-
-DB_NAME='mano_db'
-DB_ADMIN_USER="root"
-DB_USER="mano"
-DB_PASS="manopw"
-DB_ADMIN_PASSWD=""
-DB_PORT="3306"
-DB_HOST=""
-DB_HOST_PARAM=""
-QUIET_MODE=""
-FORCEDB=""
-UPDATEDB=""
-NO_PACKAGES=""
-UNINSTALL=""
-
-
-function usage(){
-    echo -e "usage: sudo $0 [OPTIONS]"
-    echo -e "Install openmano database server and the needed packages"
-    echo -e "  OPTIONS"
-    echo -e "     -U USER:    database admin user. '$DB_ADMIN_USER' by default. Prompts if needed"
-    echo -e "     -P PASS:    database admin password to be used or installed. Prompts if needed"
-    echo -e "     -d: database name, '$DB_NAME' by default"
-    echo -e "     -u: database user, '$DB_USER' by default"
-    echo -e "     -p: database pass, '$DB_PASS' by default"
-    echo -e "     -H: HOST  database host. 'localhost' by default"
-    echo -e "     -T: PORT  database port. '$DB_PORT' by default"
-    echo -e "     -q --quiet: install in unattended mode"
-    echo -e "     -h --help:  show this help"
-    echo -e "     --forcedb:  if database exists, it is dropped and a new one is created"
-    echo -e "     --updatedb: if database exists, it preserves the content and it is updated to the needed version"
-    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
-    echo -e "     --unistall: delete database"
-}
-
-function ask_user(){
-    # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
-    # Params: $1 text to ask;   $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
-    # Return: true(0) if user type 'yes'; false (1) if user type 'no'
-    read -e -p "$1" USER_CONFIRMATION
-    while true ; do
-        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
-        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
-        [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
-        [ "${USER_CONFIRMATION,,}" == "no" ]  || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
-        read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
-    done
-}
-
-function install_packages(){
-    [ -x /usr/bin/apt-get ] && apt-get install -y $*
-    [ -x /usr/bin/yum ]     && yum install     -y $*   
-    
-    #check properly installed
-    for PACKAGE in $*
-    do
-        PACKAGE_INSTALLED="no"
-        [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE            &>> /dev/null && PACKAGE_INSTALLED="yes"
-        [ -x /usr/bin/yum ]     && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" 
-        if [ "$PACKAGE_INSTALLED" = "no" ]
-        then
-            echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
-            exit 1
-       fi
-    done
-}
-
-function _install_mysql_package(){
-    echo '
-    #################################################################
-    #####               INSTALL REQUIRED PACKAGES               #####
-    #################################################################'
-    [ "$_DISTRO" == "Ubuntu" ] && ! install_packages "mysql-server" && exit 1
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && ! install_packages "mariadb mariadb-server" && exit 1
-
-    if [[ "$_DISTRO" == "Ubuntu" ]]
-    then
-        #start services. By default CentOS does not start services
-        service mysql start >> /dev/null
-        # try to set admin password, ignore if fails
-        [[ -n $DBPASSWD ]] && mysqladmin -u $DB_ADMIN_USER -s password $DB_ADMIN_PASSWD
-    fi
-
-    if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
-    then
-        #start services. By default CentOS does not start services
-        service mariadb start
-        service httpd   start
-        systemctl enable mariadb
-        systemctl enable httpd
-        ask_user "Do you want to configure mariadb (recommended if not done before) (Y/n)? " y &&
-            mysql_secure_installation
-
-        ask_user "Do you want to set firewall to grant web access port 80,443  (Y/n)? " y &&
-            firewall-cmd --permanent --zone=public --add-service=http &&
-            firewall-cmd --permanent --zone=public --add-service=https &&
-            firewall-cmd --reload
-    fi
-}
-
-function _create_db(){
-    echo '
-    #################################################################
-    #####        CREATE AND INIT DATABASE                       #####
-    #################################################################'
-    echo "mysqladmin --defaults-extra-file="$TEMPFILE" -s create ${DB_NAME}"
-    mysqladmin --defaults-extra-file="$TEMPFILE" -s create ${DB_NAME} \
-        || ! echo "Error creating ${DB_NAME} database" >&2 \
-        || exit 1
-    echo "CREATE USER $DB_USER@'localhost' IDENTIFIED BY '$DB_PASS';"   | mysql --defaults-extra-file="$TEMPFILE" -s 2>/dev/null \
-        || echo "Warning: User '$DB_USER' cannot be created at database. Probably exist" >&2
-    echo "GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '$DB_USER'@'localhost';" | mysql --defaults-extra-file="$TEMPFILE" -s \
-        || ! echo "Error: Granting privileges to user '$DB_USER' at database" >&2 \
-        || exit 1
-    echo " Database '${DB_NAME}' created, user '$DB_USER' password '$DB_PASS'"
-    DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-    ${DIRNAME}/init_mano_db.sh -u"$DB_USER" -p"$DB_PASS" -d"$DB_NAME" -P"$DB_PORT" $DB_HOST_PARAM \
-        || ! echo "Error initializing database '$DB_NAME'" >&2 \
-        || exit 1
-}
-
-function _delete_db(){
-   mysqladmin --defaults-extra-file="$TEMPFILE" -s drop "${DB_NAME}" $DBDELETEPARAM \
-       || ! echo "Error: Could not delete '${DB_NAME}' database" >&2 \
-       || exit 1
-}
-
-function _update_db(){
-    echo '
-    #################################################################
-    #####        UPDATE DATABASE                                #####
-    #################################################################'
-    echo "CREATE USER $DB_USER@'localhost' IDENTIFIED BY '$DB_PASS';" | mysql --defaults-extra-file="$TEMPFILE" -s 2>/dev/null \
-        || echo "Warning: User '$DB_USER' cannot be created at database. Probably exist" >&2
-    echo "GRANT ALL PRIVILEGES ON ${DB_NAME}.* TO '$DB_USER'@'localhost';" | mysql --defaults-extra-file="$TEMPFILE" -s \
-        || ! echo "Error: Granting privileges to user '$DB_USER' at database" >&2 \
-        || exit 1
-    echo " Granted privileges to user '$DB_USER' password '$DB_PASS' to existing database '${DB_NAME}'"
-    DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-    ${DIRNAME}/migrate_mano_db.sh -u"$DB_USER" -p"$DB_PASS" -d"$DB_NAME" -P"$DB_PORT" $DB_HOST_PARAM \
-        || ! echo "Error updating database '$DB_NAME'" >&2 \
-        || exit 1
-}
-
-function _uninstall_db(){
-echo '
-    #################################################################
-    #####        DELETE DATABASE                                #####
-    #################################################################'
-    DBDELETEPARAM=""
-    [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
-    _delete_db
-}
-
-function db_exists(){  # (db_name, credential_file)
-    # check credentials
-    mysqlshow --defaults-extra-file="$2" >/dev/null  || exit 1
-    if mysqlshow --defaults-extra-file="$2" | grep -v Wildcard | grep -w -q $1
-    then
-        # echo " DB $1 exists"
-        return 0
-    fi
-    # echo " DB $1 does not exist"
-    return 1
-}
-
-while getopts ":U:P:d:u:p:H:T:hiq-:" o; do
-    case "${o}" in
-        U)
-            export DB_ADMIN_USER="$OPTARG"
-            ;;
-        P)
-            export DB_ADMIN_PASSWD="$OPTARG"
-            ;;
-        d)
-            export DB_NAME="$OPTARG"
-            ;;
-        u)
-            export DB_USER="$OPTARG"
-            ;;
-        p)
-            export DB_PASS="$OPTARG"
-            ;;
-        H)
-            export DB_HOST="$OPTARG"
-            export DB_HOST_PARAM="-h$DB_HOST"
-            ;;
-        T)
-            export DB_PORT="$OPTARG"
-            ;;
-        q)
-            export QUIET_MODE=yes
-            export DEBIAN_FRONTEND=noninteractive
-            ;;
-        h)
-            usage && exit 0
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "forcedb" ] && FORCEDB="y" && continue
-            [ "${OPTARG}" == "updatedb" ] && UPDATEDB="y" && continue
-            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
-            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
-            [ "${OPTARG}" == "uninstall" ] &&  UNINSTALL="y" && continue
-            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        \?)
-            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit 1
-            ;;
-    esac
-done
-if [ -n "$FORCEDB" ] && [ -n "$UPDATEDB" ] ; then
-    echo "Error: options --forcedb and --updatedb are mutually exclusive" >&2
-    exit 1
-fi
-
-# Discover Linux distribution
-# try redhat type
-[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut  -d" " -f1)
-# if not assuming ubuntu type
-[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is  2>/dev/null)
-
-if [[ -z "$NO_PACKAGES" ]]
-then
-    [ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
-    _install_mysql_package || exit 1
-fi
-
-# Creating temporary file for MYSQL installation and initialization"
-TEMPFILE="$(mktemp -q --tmpdir "installdb.XXXXXX")"
-trap 'rm -f "$TEMPFILE"' EXIT
-chmod 0600 "$TEMPFILE"
-echo -e "[client]\n user='${DB_ADMIN_USER}'\n password='$DB_ADMIN_PASSWD'\n host='$DB_HOST'\n port='$DB_PORT'" > "$TEMPFILE"
-
-#check and ask for database user password. Must be done after database installation
-if [[ -z $QUIET_MODE ]]
-then
-    echo -e "\nCheking database connection and ask for credentials"
-    # echo "mysqladmin --defaults-extra-file=$TEMPFILE -s status >/dev/null"
-    while ! mysqladmin --defaults-extra-file="$TEMPFILE" -s status >/dev/null
-    do
-        [ -n "$logintry" ] &&  echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)"
-        [ -z "$logintry" ] &&  echo -e "\nProvide database credentials"
-        read -e -p "database admin user? ($DB_ADMIN_USER) " DBUSER_
-        [ -n "$DBUSER_" ] && DB_ADMIN_USER=$DBUSER_
-        read -e -s -p "database admin password? (Enter for not using password) " DBPASSWD_
-        [ -n "$DBPASSWD_" ] && DB_ADMIN_PASSWD="$DBPASSWD_"
-        [ -z "$DBPASSWD_" ] && DB_ADMIN_PASSWD=""
-        echo -e "[client]\n user='${DB_ADMIN_USER}'\n password='$DB_ADMIN_PASSWD'\n host='$DB_HOST'\n port='$DB_PORT'" > "$TEMPFILE"
-        logintry="yes"
-    done
-fi
-
-if [[ ! -z "$UNINSTALL" ]]
-then
-    _uninstall_db
-    exit
-fi
-
-# Create or update database
-if db_exists $DB_NAME $TEMPFILE ; then
-    if [[ -n $FORCEDB ]] ; then
-        # DBDELETEPARAM=""
-        # [[ -n $QUIET_MODE ]] && DBDELETEPARAM="-f"
-        DBDELETEPARAM="-f"
-        _delete_db
-        _create_db
-    elif [[ -n $UPDATEDB ]] ; then
-        _update_db
-    elif [[ -z $QUIET_MODE ]] ; then
-        echo "database '$DB_NAME' exist. Reinstall it?"
-        if ask_user "Type 'y' to drop and reinstall existing database (content will be lost), Type 'n' to update existing database (y/N)? " n ; then
-            _delete_db
-            _create_db
-        else
-            _update_db
-        fi
-    else
-        echo "Database '$DB_NAME' exists. Use option '--forcedb' to force the deletion of the existing one, or '--updatedb' to use existing one and update it"
-        exit 1
-    fi
-else
-    _create_db
-fi
-
diff --git a/database_utils/mano_db_structure.sql b/database_utils/mano_db_structure.sql
deleted file mode 100644 (file)
index 9e2d911..0000000
+++ /dev/null
@@ -1,1240 +0,0 @@
-/**
-* Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
-* This file is part of openmano
-* All Rights Reserved.
-*
-* Licensed under the Apache License, Version 2.0 (the "License"); you may
-* not use this file except in compliance with the License. You may obtain
-* a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-* License for the specific language governing permissions and limitations
-* under the License.
-*
-* For those usages not covered by the Apache License, Version 2.0 please
-* contact with: nfvlabs@tid.es
-**/
-
--- MySQL dump 10.13  Distrib 5.7.24, for Linux (x86_64)
---
--- Host: localhost    Database: {{mano_db}}
--- ------------------------------------------------------
--- Server version      5.7.24
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Current Database: `{{mano_db}}`
---
-
-/*!40000 DROP DATABASE IF EXISTS `{{mano_db}}`*/;
-
-CREATE DATABASE /*!32312 IF NOT EXISTS*/ `{{mano_db}}` /*!40100 DEFAULT CHARACTER SET utf8 */;
-
-USE `{{mano_db}}`;
-
---
--- Table structure for table `datacenter_nets`
---
-
-DROP TABLE IF EXISTS `datacenter_nets`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `datacenter_nets` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `vim_net_id` varchar(36) NOT NULL,
-  `datacenter_id` varchar(36) NOT NULL,
-  `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
-  `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
-  `shared` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If can be shared with serveral scenarios',
-  `description` varchar(255) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name_datacenter_id` (`name`,`datacenter_id`),
-  KEY `FK_datacenter_nets_datacenters` (`datacenter_id`),
-  CONSTRAINT `FK_datacenter_nets_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contain the external nets of a datacenter';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `datacenter_tenants`
---
-
-DROP TABLE IF EXISTS `datacenter_tenants`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `datacenter_tenants` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) DEFAULT NULL,
-  `datacenter_id` varchar(36) NOT NULL COMMENT 'Datacenter of this tenant',
-  `vim_tenant_name` varchar(256) DEFAULT NULL,
-  `vim_tenant_id` varchar(256) DEFAULT NULL COMMENT 'Tenant ID at VIM',
-  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if this tenant has been created by openmano, or it existed on VIM',
-  `user` varchar(64) DEFAULT NULL,
-  `passwd` varchar(64) DEFAULT NULL,
-  `config` varchar(4000) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_vim_tenants_datacenters` (`datacenter_id`),
-  CONSTRAINT `FK_vim_tenants_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `datacenters`
---
-
-DROP TABLE IF EXISTS `datacenters`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `datacenters` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `type` varchar(36) NOT NULL DEFAULT 'openvim',
-  `vim_url` varchar(150) NOT NULL COMMENT 'URL of the VIM for the REST API',
-  `vim_url_admin` varchar(150) DEFAULT NULL,
-  `config` varchar(4000) DEFAULT NULL COMMENT 'extra config information in json',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name` (`name`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Datacenters managed by the NFVO.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `datacenters_flavors`
---
-
-DROP TABLE IF EXISTS `datacenters_flavors`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `datacenters_flavors` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `flavor_id` varchar(36) NOT NULL,
-  `datacenter_vim_id` varchar(36) NOT NULL,
-  `vim_id` varchar(36) NOT NULL,
-  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `vim_info` text,
-  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if it has been created by openmano, or already existed',
-  `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description json format of additional devices',
-  PRIMARY KEY (`id`),
-  KEY `FK__flavors` (`flavor_id`),
-  KEY `FK_datacenters_flavors_datacenter_tenants` (`datacenter_vim_id`),
-  CONSTRAINT `FK__flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_datacenters_flavors_datacenter_tenants` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `datacenters_images`
---
-
-DROP TABLE IF EXISTS `datacenters_images`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `datacenters_images` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `image_id` varchar(36) NOT NULL,
-  `datacenter_vim_id` varchar(36) NOT NULL,
-  `vim_id` varchar(36) NOT NULL,
-  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `vim_info` text,
-  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Indicates if it has been created by openmano, or already existed',
-  PRIMARY KEY (`id`),
-  KEY `FK__images` (`image_id`),
-  KEY `FK_datacenters_images_datacenter_tenants` (`datacenter_vim_id`),
-  CONSTRAINT `FK__images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_datacenters_images_datacenter_tenants` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `flavors`
---
-
-DROP TABLE IF EXISTS `flavors`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `flavors` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `disk` smallint(5) unsigned DEFAULT NULL,
-  `ram` mediumint(7) unsigned DEFAULT NULL,
-  `vcpus` smallint(5) unsigned DEFAULT NULL,
-  `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
-  PRIMARY KEY (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `images`
---
-
-DROP TABLE IF EXISTS `images`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `images` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `universal_name` varchar(255) DEFAULT NULL,
-  `checksum` varchar(32) DEFAULT NULL,
-  `location` varchar(200) DEFAULT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `metadata` varchar(2000) DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `location` (`location`),
-  UNIQUE KEY `universal_name_checksum` (`universal_name`,`checksum`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_actions`
---
-
-DROP TABLE IF EXISTS `instance_actions`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_actions` (
-  `uuid` varchar(36) NOT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `instance_id` varchar(36) DEFAULT NULL,
-  `description` varchar(64) DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
-  `number_tasks` smallint(6) NOT NULL DEFAULT '1',
-  `number_done` smallint(6) NOT NULL DEFAULT '0',
-  `number_failed` smallint(6) NOT NULL DEFAULT '0',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_actions_tenants` (`tenant_id`),
-  CONSTRAINT `FK_actions_tenant` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contains client actions over instances';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_classifications`
---
-
-DROP TABLE IF EXISTS `instance_classifications`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_classifications` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_scenario_id` varchar(36) NOT NULL,
-  `vim_classification_id` varchar(36) DEFAULT NULL,
-  `sce_classifier_match_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) DEFAULT NULL,
-  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
-  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `vim_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_classifications_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_classifications_sce_classifier_matches` (`sce_classifier_match_id`),
-  KEY `FK_instance_classifications_datacenters` (`datacenter_id`),
-  KEY `FK_instance_classifications_datacenter_tenants` (`datacenter_tenant_id`),
-  CONSTRAINT `FK_instance_classifications_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_classifications_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_classifications_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_classifications_sce_classifier_matches` FOREIGN KEY (`sce_classifier_match_id`) REFERENCES `sce_classifier_matches` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_interfaces`
---
-
-DROP TABLE IF EXISTS `instance_interfaces`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_interfaces` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_vm_id` varchar(36) NOT NULL,
-  `instance_net_id` varchar(36) NOT NULL,
-  `interface_id` varchar(36) DEFAULT NULL,
-  `vim_interface_id` varchar(128) DEFAULT NULL,
-  `mac_address` varchar(32) DEFAULT NULL,
-  `ip_address` varchar(64) DEFAULT NULL,
-  `vim_info` text,
-  `type` enum('internal','external') NOT NULL COMMENT 'Indicates if this interface is external to a vnf, or internal',
-  `floating_ip` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if a floating_ip must be associated to this interface',
-  `port_security` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled',
-  `sdn_port_id` varchar(36) DEFAULT NULL COMMENT 'Port id in ovim',
-  `compute_node` varchar(100) DEFAULT NULL COMMENT 'Compute node id used to specify the SDN port mapping',
-  `pci` varchar(50) DEFAULT NULL COMMENT 'PCI of the  physical port in the host',
-  `vlan` smallint(5) unsigned DEFAULT NULL COMMENT 'VLAN tag used by the port',
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_vms` (`instance_vm_id`),
-  KEY `FK_instance_nets` (`instance_net_id`),
-  KEY `FK_instance_ids` (`interface_id`),
-  CONSTRAINT `FK_instance_ids` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_nets` FOREIGN KEY (`instance_net_id`) REFERENCES `instance_nets` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_vms` FOREIGN KEY (`instance_vm_id`) REFERENCES `instance_vms` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Table with all running associattion among VM instances and net instances';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_nets`
---
-
-DROP TABLE IF EXISTS `instance_nets`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_nets` (
-  `uuid` varchar(36) NOT NULL,
-  `vim_net_id` varchar(128) DEFAULT NULL,
-  `vim_name` varchar(255) DEFAULT NULL,
-  `instance_scenario_id` varchar(36) DEFAULT NULL,
-  `sce_net_id` varchar(36) DEFAULT NULL,
-  `net_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) DEFAULT NULL,
-  `datacenter_tenant_id` varchar(36) NOT NULL,
-  `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `vim_info` text,
-  `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
-  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `sdn_net_id` varchar(36) DEFAULT NULL COMMENT 'Network id in ovim',
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_nets_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_nets_sce_nets` (`sce_net_id`),
-  KEY `FK_instance_nets_nets` (`net_id`),
-  KEY `FK_instance_nets_datacenters` (`datacenter_id`),
-  KEY `FK_instance_nets_datacenter_tenants` (`datacenter_tenant_id`),
-  CONSTRAINT `FK_instance_nets_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_nets_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_nets_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of networks';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_scenarios`
---
-
-DROP TABLE IF EXISTS `instance_scenarios`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_scenarios` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `scenario_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) NOT NULL,
-  `datacenter_tenant_id` varchar(36) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `cloud_config` mediumtext,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_scenarios_nfvo_tenants` (`tenant_id`),
-  KEY `FK_instance_scenarios_vim_tenants` (`datacenter_tenant_id`),
-  KEY `FK_instance_scenarios_datacenters` (`datacenter_id`),
-  KEY `FK_instance_scenarios_scenarios` (`scenario_id`),
-  CONSTRAINT `FK_instance_scenarios_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_scenarios_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_scenarios_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_scenarios_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of scenarios defined by the user';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_sfis`
---
-
-DROP TABLE IF EXISTS `instance_sfis`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_sfis` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_scenario_id` varchar(36) NOT NULL,
-  `vim_sfi_id` varchar(36) DEFAULT NULL,
-  `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) DEFAULT NULL,
-  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
-  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `vim_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_sfis_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_sfis_sce_rsp_hops` (`sce_rsp_hop_id`),
-  KEY `FK_instance_sfis_datacenters` (`datacenter_id`),
-  KEY `FK_instance_sfis_datacenter_tenants` (`datacenter_tenant_id`),
-  CONSTRAINT `FK_instance_sfis_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_sfis_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_sfis_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_sfis_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_sfps`
---
-
-DROP TABLE IF EXISTS `instance_sfps`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_sfps` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_scenario_id` varchar(36) NOT NULL,
-  `vim_sfp_id` varchar(36) DEFAULT NULL,
-  `sce_rsp_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) DEFAULT NULL,
-  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
-  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `vim_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_sfps_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_sfps_sce_rsps` (`sce_rsp_id`),
-  KEY `FK_instance_sfps_datacenters` (`datacenter_id`),
-  KEY `FK_instance_sfps_datacenter_tenants` (`datacenter_tenant_id`),
-  CONSTRAINT `FK_instance_sfps_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_sfps_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_sfps_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_sfps_sce_rsps` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_sfs`
---
-
-DROP TABLE IF EXISTS `instance_sfs`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_sfs` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_scenario_id` varchar(36) NOT NULL,
-  `vim_sf_id` varchar(36) DEFAULT NULL,
-  `sce_rsp_hop_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) DEFAULT NULL,
-  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
-  `status` enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `vim_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_sfs_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_sfs_sce_rsp_hops` (`sce_rsp_hop_id`),
-  KEY `FK_instance_sfs_datacenters` (`datacenter_id`),
-  KEY `FK_instance_sfs_datacenter_tenants` (`datacenter_tenant_id`),
-  CONSTRAINT `FK_instance_sfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_sfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_sfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_sfs_sce_rsp_hops` FOREIGN KEY (`sce_rsp_hop_id`) REFERENCES `sce_rsp_hops` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_vms`
---
-
-DROP TABLE IF EXISTS `instance_vms`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_vms` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_vnf_id` varchar(36) NOT NULL,
-  `vm_id` varchar(36) DEFAULT NULL,
-  `vim_vm_id` varchar(128) DEFAULT NULL,
-  `vim_name` varchar(255) DEFAULT NULL,
-  `status` enum('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `vim_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `vim_vm_id` (`vim_vm_id`),
-  KEY `FK_instance_vms_vms` (`vm_id`),
-  KEY `FK_instance_vms_instance_vnfs` (`instance_vnf_id`),
-  CONSTRAINT `FK_instance_vms_instance_vnfs` FOREIGN KEY (`instance_vnf_id`) REFERENCES `instance_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_vms_vms` FOREIGN KEY (`vm_id`) REFERENCES `vms` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VMs as part of VNF instances';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_vnfs`
---
-
-DROP TABLE IF EXISTS `instance_vnfs`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_vnfs` (
-  `uuid` varchar(36) NOT NULL,
-  `instance_scenario_id` varchar(36) NOT NULL,
-  `vnf_id` varchar(36) NOT NULL,
-  `sce_vnf_id` varchar(36) DEFAULT NULL,
-  `datacenter_id` varchar(36) DEFAULT NULL,
-  `datacenter_tenant_id` varchar(36) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_vnfs_vnfs` (`vnf_id`),
-  KEY `FK_instance_vnfs_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_vnfs_sce_vnfs` (`sce_vnf_id`),
-  KEY `FK_instance_vnfs_datacenters` (`datacenter_id`),
-  KEY `FK_instance_vnfs_datacenter_tenants` (`datacenter_tenant_id`),
-  CONSTRAINT `FK_instance_vnfs_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_instance_vnfs_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_instance_vnfs_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_vnfs_sce_vnfs` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_vnfs_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of VNFs as part of a scenario';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `instance_wim_nets`
---
-
-DROP TABLE IF EXISTS `instance_wim_nets`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `instance_wim_nets` (
-  `uuid` varchar(36) NOT NULL,
-  `wim_internal_id` varchar(128) DEFAULT NULL COMMENT 'Internal ID used by the WIM to refer to the network',
-  `instance_scenario_id` varchar(36) DEFAULT NULL,
-  `sce_net_id` varchar(36) DEFAULT NULL,
-  `wim_id` varchar(36) DEFAULT NULL,
-  `wim_account_id` varchar(36) NOT NULL,
-  `status` enum('ACTIVE','INACTIVE','DOWN','BUILD','ERROR','WIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `wim_info` text,
-  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
-  `created` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at WIM',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
-  KEY `FK_instance_wim_nets_wims` (`wim_id`),
-  KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
-  CONSTRAINT `FK_instance_wim_nets_instance_scenarios` FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_wim_nets_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_wim_nets_wim_accounts` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
-  CONSTRAINT `FK_instance_wim_nets_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Instances of wim networks';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `interfaces`
---
-
-DROP TABLE IF EXISTS `interfaces`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `interfaces` (
-  `uuid` varchar(36) NOT NULL,
-  `internal_name` varchar(255) NOT NULL,
-  `external_name` varchar(255) DEFAULT NULL,
-  `vm_id` varchar(36) NOT NULL,
-  `net_id` varchar(36) DEFAULT NULL,
-  `type` enum('mgmt','bridge','data') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
-  `vpci` char(12) DEFAULT NULL,
-  `bw` mediumint(8) unsigned DEFAULT NULL COMMENT 'BW expressed in Mbits/s. Maybe this field is not necessary.',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `model` varchar(12) DEFAULT NULL,
-  `mac` char(18) DEFAULT NULL,
-  `ip_address` varchar(64) DEFAULT NULL,
-  `floating_ip` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if a floating_ip must be associated to this interface',
-  `port_security` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled',
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `internal_name_vm_id` (`internal_name`,`vm_id`),
-  KEY `FK_interfaces_vms` (`vm_id`),
-  KEY `FK_interfaces_nets` (`net_id`),
-  CONSTRAINT `FK_interfaces_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE CASCADE,
-  CONSTRAINT `FK_interfaces_vms` FOREIGN KEY (`vm_id`) REFERENCES `vms` (`uuid`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VM interfaces';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ip_profiles`
---
-
-DROP TABLE IF EXISTS `ip_profiles`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `ip_profiles` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `net_id` varchar(36) DEFAULT NULL,
-  `sce_net_id` varchar(36) DEFAULT NULL,
-  `instance_net_id` varchar(36) DEFAULT NULL,
-  `ip_version` enum('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
-  `subnet_address` varchar(64) DEFAULT NULL,
-  `gateway_address` varchar(64) DEFAULT NULL,
-  `dns_address` varchar(255) DEFAULT NULL COMMENT 'dns ip list separated by semicolon',
-  `dhcp_enabled` enum('true','false') NOT NULL DEFAULT 'true',
-  `dhcp_start_address` varchar(64) DEFAULT NULL,
-  `dhcp_count` int(11) DEFAULT NULL,
-  `security_group` varchar(255) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  KEY `FK_ipprofiles_nets` (`net_id`),
-  KEY `FK_ipprofiles_scenets` (`sce_net_id`),
-  KEY `FK_ipprofiles_instancenets` (`instance_net_id`),
-  CONSTRAINT `FK_ipprofiles_instancenets` FOREIGN KEY (`instance_net_id`) REFERENCES `instance_nets` (`uuid`) ON DELETE CASCADE,
-  CONSTRAINT `FK_ipprofiles_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE CASCADE,
-  CONSTRAINT `FK_ipprofiles_scenets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `logs`
---
-
-DROP TABLE IF EXISTS `logs`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `logs` (
-  `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
-  `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
-  `nfvo_tenant_id` varchar(36) DEFAULT NULL,
-  `related` varchar(36) NOT NULL COMMENT 'Relevant element for the log',
-  `uuid` varchar(36) DEFAULT NULL COMMENT 'Uuid of vnf, scenario, etc. that log relates to',
-  `level` enum('panic','error','info','debug','verbose') NOT NULL,
-  `description` varchar(200) NOT NULL,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB AUTO_INCREMENT=3423 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `nets`
---
-
-DROP TABLE IF EXISTS `nets`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `nets` (
-  `uuid` varchar(36) NOT NULL,
-  `osm_id` varchar(255) DEFAULT NULL,
-  `vnf_id` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
-  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
-  `description` varchar(255) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `vnf_id_name` (`vnf_id`,`name`),
-  CONSTRAINT `FK_nets_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a VNF definition. These are only the internal networks among VMs of the same VNF.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `nfvo_tenants`
---
-
-DROP TABLE IF EXISTS `nfvo_tenants`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `nfvo_tenants` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `encrypted_RO_priv_key` varchar(2000) DEFAULT NULL,
-  `RO_pub_key` varchar(510) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name` (`name`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_classifier_matches`
---
-
-DROP TABLE IF EXISTS `sce_classifier_matches`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_classifier_matches` (
-  `uuid` varchar(36) NOT NULL,
-  `ip_proto` varchar(2) NOT NULL,
-  `source_ip` varchar(16) NOT NULL,
-  `destination_ip` varchar(16) NOT NULL,
-  `source_port` varchar(5) NOT NULL,
-  `destination_port` varchar(5) NOT NULL,
-  `sce_classifier_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_classifiers_classifier_match` (`sce_classifier_id`),
-  CONSTRAINT `FK_sce_classifiers_classifier_match` FOREIGN KEY (`sce_classifier_id`) REFERENCES `sce_classifiers` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_classifiers`
---
-
-DROP TABLE IF EXISTS `sce_classifiers`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_classifiers` (
-  `uuid` varchar(36) NOT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `sce_vnffg_id` varchar(36) NOT NULL,
-  `sce_rsp_id` varchar(36) NOT NULL,
-  `sce_vnf_id` varchar(36) NOT NULL,
-  `interface_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_sce_vnffgs_classifier` (`sce_vnffg_id`),
-  KEY `FK_sce_rsps_classifier` (`sce_rsp_id`),
-  KEY `FK_sce_vnfs_classifier` (`sce_vnf_id`),
-  KEY `FK_interfaces_classifier` (`interface_id`),
-  CONSTRAINT `FK_interfaces_classifier` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_rsps_classifier` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_vnffgs_classifier` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_vnfs_classifier` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_interfaces`
---
-
-DROP TABLE IF EXISTS `sce_interfaces`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_interfaces` (
-  `uuid` varchar(36) NOT NULL,
-  `sce_vnf_id` varchar(36) NOT NULL,
-  `sce_net_id` varchar(36) DEFAULT NULL,
-  `interface_id` varchar(36) DEFAULT NULL,
-  `ip_address` varchar(64) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_sce_interfaces_sce_vnfs` (`sce_vnf_id`),
-  KEY `FK_sce_interfaces_sce_nets` (`sce_net_id`),
-  KEY `FK_sce_interfaces_interfaces` (`interface_id`),
-  CONSTRAINT `FK_sce_interfaces_interfaces` FOREIGN KEY (`interface_id`) REFERENCES `interfaces` (`uuid`),
-  CONSTRAINT `FK_sce_interfaces_sce_nets` FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_interfaces_sce_vnfs` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNF interfaces in a scenario definition.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_nets`
---
-
-DROP TABLE IF EXISTS `sce_nets`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_nets` (
-  `uuid` varchar(36) NOT NULL,
-  `osm_id` varchar(255) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `scenario_id` varchar(36) DEFAULT NULL COMMENT 'NULL if net is matched to several scenarios',
-  `type` enum('bridge','data','ptp') NOT NULL DEFAULT 'data' COMMENT 'Type of network',
-  `multipoint` enum('true','false') NOT NULL DEFAULT 'true',
-  `external` enum('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, net is already present at VIM',
-  `description` varchar(255) DEFAULT NULL,
-  `vim_network_name` varchar(255) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `graph` varchar(2000) DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_sce_nets_scenarios` (`scenario_id`),
-  CONSTRAINT `FK_sce_nets_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Networks in a scenario definition. It only considers networks among VNFs. Networks among internal VMs are only considered in tble ''nets''.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_rsp_hops`
---
-
-DROP TABLE IF EXISTS `sce_rsp_hops`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_rsp_hops` (
-  `uuid` varchar(36) NOT NULL,
-  `if_order` int(11) NOT NULL DEFAULT '0',
-  `ingress_interface_id` varchar(36) NOT NULL,
-  `egress_interface_id` varchar(36) NOT NULL,
-  `sce_vnf_id` varchar(36) NOT NULL,
-  `sce_rsp_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_interfaces_rsp_hop` (`ingress_interface_id`),
-  KEY `FK_sce_vnfs_rsp_hop` (`sce_vnf_id`),
-  KEY `FK_sce_rsps_rsp_hop` (`sce_rsp_id`),
-  KEY `FK_interfaces_rsp_hop_egress` (`egress_interface_id`),
-  CONSTRAINT `FK_interfaces_rsp_hop_egress` FOREIGN KEY (`egress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_interfaces_rsp_hop_ingress` FOREIGN KEY (`ingress_interface_id`) REFERENCES `interfaces` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_rsps_rsp_hop` FOREIGN KEY (`sce_rsp_id`) REFERENCES `sce_rsps` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_vnfs_rsp_hop` FOREIGN KEY (`sce_vnf_id`) REFERENCES `sce_vnfs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_rsps`
---
-
-DROP TABLE IF EXISTS `sce_rsps`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_rsps` (
-  `uuid` varchar(36) NOT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `sce_vnffg_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_sce_vnffgs_rsp` (`sce_vnffg_id`),
-  CONSTRAINT `FK_sce_vnffgs_rsp` FOREIGN KEY (`sce_vnffg_id`) REFERENCES `sce_vnffgs` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_vnffgs`
---
-
-DROP TABLE IF EXISTS `sce_vnffgs`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_vnffgs` (
-  `uuid` varchar(36) NOT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `vendor` varchar(255) DEFAULT NULL,
-  `scenario_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_scenarios_sce_vnffg` (`scenario_id`),
-  KEY `FK_scenarios_vnffg` (`tenant_id`),
-  CONSTRAINT `FK_scenarios_vnffg` FOREIGN KEY (`tenant_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `sce_vnfs`
---
-
-DROP TABLE IF EXISTS `sce_vnfs`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `sce_vnfs` (
-  `uuid` varchar(36) NOT NULL,
-  `member_vnf_index` varchar(255) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `scenario_id` varchar(36) NOT NULL,
-  `vnf_id` varchar(36) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `graph` varchar(2000) DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name_scenario_id` (`name`,`scenario_id`),
-  KEY `FK_sce_vnfs_scenarios` (`scenario_id`),
-  KEY `FK_sce_vnfs_vnfs` (`vnf_id`),
-  CONSTRAINT `FK_sce_vnfs_scenarios` FOREIGN KEY (`scenario_id`) REFERENCES `scenarios` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_sce_vnfs_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNFs in scenario definitions. This table also contains the Physical Network Functions and the external elements such as MAN, Core, etc.\r\n';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `scenarios`
---
-
-DROP TABLE IF EXISTS `scenarios`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `scenarios` (
-  `uuid` varchar(36) NOT NULL,
-  `osm_id` varchar(255) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `short_name` varchar(255) DEFAULT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `vendor` varchar(255) DEFAULT NULL,
-  `public` enum('true','false') NOT NULL DEFAULT 'false',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `descriptor` text COMMENT 'Original text descriptor used for create the scenario',
-  `cloud_config` mediumtext,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `osm_id_tenant_id` (`osm_id`,`tenant_id`),
-  KEY `FK_scenarios_nfvo_tenants` (`tenant_id`),
-  CONSTRAINT `FK_scenarios_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `schema_version`
---
-
-DROP TABLE IF EXISTS `schema_version`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `schema_version` (
-  `version_int` int(11) NOT NULL COMMENT 'version as a number. Must not contain gaps',
-  `version` varchar(20) NOT NULL COMMENT 'version as a text',
-  `openmano_ver` varchar(20) NOT NULL COMMENT 'openmano version',
-  `comments` varchar(2000) DEFAULT NULL COMMENT 'changes to database',
-  `date` date DEFAULT NULL,
-  PRIMARY KEY (`version_int`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='database schema control version';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `tenants_datacenters`
---
-
-DROP TABLE IF EXISTS `tenants_datacenters`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants_datacenters` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `nfvo_tenant_id` varchar(36) NOT NULL,
-  `datacenter_id` varchar(36) NOT NULL,
-  `datacenter_tenant_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `datacenter_nfvo_tenant` (`datacenter_id`,`nfvo_tenant_id`),
-  KEY `FK_nfvo_tenants_datacenters` (`datacenter_id`),
-  KEY `FK_nfvo_tenants_vim_tenants` (`datacenter_tenant_id`),
-  KEY `FK_tenants_datacenters_nfvo_tenants` (`nfvo_tenant_id`),
-  CONSTRAINT `FK_tenants_datacenters_datacenter_tenants` FOREIGN KEY (`datacenter_tenant_id`) REFERENCES `datacenter_tenants` (`uuid`),
-  CONSTRAINT `FK_tenants_datacenters_datacenters` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`),
-  CONSTRAINT `FK_tenants_datacenters_nfvo_tenants` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`)
-) ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='Scenarios defined by the user';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `uuids`
---
-
-DROP TABLE IF EXISTS `uuids`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `uuids` (
-  `uuid` varchar(36) NOT NULL,
-  `root_uuid` varchar(36) DEFAULT NULL COMMENT 'Some related UUIDs can be grouped by this field, so that they can be deleted at once',
-  `created_at` double NOT NULL,
-  `used_at` varchar(36) DEFAULT NULL COMMENT 'Table that uses this UUID',
-  PRIMARY KEY (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with all unique IDs used to avoid UUID repetitions among different elements';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `vim_wim_actions`
---
-
-DROP TABLE IF EXISTS `vim_wim_actions`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `vim_wim_actions` (
-  `instance_action_id` varchar(36) NOT NULL,
-  `task_index` int(6) NOT NULL,
-  `datacenter_vim_id` varchar(36) DEFAULT NULL,
-  `vim_id` varchar(64) DEFAULT NULL,
-  `wim_account_id` varchar(36) DEFAULT NULL,
-  `wim_internal_id` varchar(64) DEFAULT NULL,
-  `action` varchar(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
-  `item` enum('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_wim_nets') NOT NULL COMMENT 'table where the item is stored',
-  `item_id` varchar(36) DEFAULT NULL COMMENT 'uuid of the entry in the table',
-  `status` enum('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
-  `extra` text COMMENT 'json with params:, depends_on: for the task',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`task_index`,`instance_action_id`),
-  KEY `FK_actions_instance_actions` (`instance_action_id`),
-  KEY `FK_actions_vims` (`datacenter_vim_id`),
-  KEY `item_type_id` (`item`,`item_id`),
-  KEY `FK_actions_wims` (`wim_account_id`),
-  CONSTRAINT `FK_actions_instance_actions` FOREIGN KEY (`instance_action_id`) REFERENCES `instance_actions` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_actions_vims` FOREIGN KEY (`datacenter_vim_id`) REFERENCES `datacenter_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Table with the individual VIM actions.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `vms`
---
-
-DROP TABLE IF EXISTS `vms`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `vms` (
-  `uuid` varchar(36) NOT NULL,
-  `osm_id` varchar(255) DEFAULT NULL,
-  `pdu_type` varchar(255) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `vnf_id` varchar(36) NOT NULL,
-  `count` smallint(6) NOT NULL DEFAULT '1',
-  `flavor_id` varchar(36) NOT NULL COMMENT 'Link to flavor table',
-  `image_id` varchar(36) DEFAULT NULL COMMENT 'Link to image table',
-  `image_list` text COMMENT 'Alternative images',
-  `image_path` varchar(100) DEFAULT NULL COMMENT 'Path where the image of the VM is located',
-  `boot_data` text,
-  `description` varchar(255) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `availability_zone` varchar(255) DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name_vnf_id` (`name`,`vnf_id`),
-  KEY `FK_vms_vnfs` (`vnf_id`),
-  KEY `FK_vms_images` (`image_id`),
-  KEY `FK_vms_flavors` (`flavor_id`),
-  CONSTRAINT `FK_vms_flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`),
-  CONSTRAINT `FK_vms_images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`),
-  CONSTRAINT `FK_vms_vnfs` FOREIGN KEY (`vnf_id`) REFERENCES `vnfs` (`uuid`) ON DELETE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VM definitions. It contains the set of VMs used by the VNF definitions.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `vnfs`
---
-
-DROP TABLE IF EXISTS `vnfs`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `vnfs` (
-  `uuid` varchar(36) NOT NULL,
-  `osm_id` varchar(255) DEFAULT NULL,
-  `name` varchar(255) NOT NULL,
-  `short_name` varchar(255) DEFAULT NULL,
-  `tenant_id` varchar(36) DEFAULT NULL,
-  `physical` enum('true','false') NOT NULL DEFAULT 'false',
-  `public` enum('true','false') NOT NULL DEFAULT 'false',
-  `description` varchar(255) DEFAULT NULL,
-  `vendor` varchar(255) DEFAULT NULL,
-  `mgmt_access` varchar(2000) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  `class` varchar(36) DEFAULT 'MISC',
-  `descriptor` text COMMENT 'Original text descriptor used for create the VNF',
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `osm_id_tenant_id` (`osm_id`,`tenant_id`),
-  KEY `FK_vnfs_nfvo_tenants` (`tenant_id`),
-  CONSTRAINT `FK_vnfs_nfvo_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='VNF definitions. This is the catalogue of VNFs. It also includes Physical Network Functions or Physical Elements.\r\n';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `wim_accounts`
---
-
-DROP TABLE IF EXISTS `wim_accounts`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `wim_accounts` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) DEFAULT NULL,
-  `wim_id` varchar(36) NOT NULL,
-  `created` enum('true','false') NOT NULL DEFAULT 'false',
-  `user` varchar(64) DEFAULT NULL,
-  `password` varchar(64) DEFAULT NULL,
-  `config` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `wim_name` (`wim_id`,`name`),
-  KEY `FK_wim_accounts_wims` (`wim_id`),
-  CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts by the user';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `wim_nfvo_tenants`
---
-
-DROP TABLE IF EXISTS `wim_nfvo_tenants`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `wim_nfvo_tenants` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `nfvo_tenant_id` varchar(36) NOT NULL,
-  `wim_id` varchar(36) NOT NULL,
-  `wim_account_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
-  KEY `FK_wims_nfvo_tenants` (`wim_id`),
-  KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
-  KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
-  CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`) REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIM accounts mapping to NFVO tenants';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `wim_port_mappings`
---
-
-DROP TABLE IF EXISTS `wim_port_mappings`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `wim_port_mappings` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `wim_id` varchar(36) NOT NULL,
-  `datacenter_id` varchar(36) NOT NULL,
-  `pop_switch_dpid` varchar(64) NOT NULL,
-  `pop_switch_port` varchar(64) NOT NULL,
-  `wan_service_endpoint_id` varchar(256) NOT NULL COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
-  `wan_service_mapping_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `unique_datacenter_port_mapping` (`datacenter_id`,`pop_switch_dpid`,`pop_switch_port`),
-  UNIQUE KEY `unique_wim_port_mapping` (`wim_id`,`wan_service_endpoint_id`),
-  KEY `FK_wims_wim_physical_connections` (`wim_id`),
-  KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
-  CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`) REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='WIM port mappings managed by the WIM.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `wims`
---
-
-DROP TABLE IF EXISTS `wims`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `wims` (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `type` varchar(36) NOT NULL DEFAULT 'odl',
-  `wim_url` varchar(150) NOT NULL,
-  `config` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name` (`name`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT COMMENT='WIMs managed by the NFVO.';
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping routines for database 'mano_db'
---
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2018-12-10  9:58:03
-
-
-
-
-
--- MySQL dump 10.13  Distrib 5.7.24, for Linux (x86_64)
---
--- Host: localhost    Database: {{mano_db}}
--- ------------------------------------------------------
--- Server version      5.7.24
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Dumping data for table `schema_version`
---
-
-LOCK TABLES `schema_version` WRITE;
-/*!40000 ALTER TABLE `schema_version` DISABLE KEYS */;
-INSERT INTO `schema_version` VALUES
-(0,'0.0','0.0.0','Database in init process','2015-05-08'),
-(1,'0.1','0.2.2','insert schema_version','2015-05-08'),
-(2,'0.2','0.2.5','new tables images,flavors','2015-07-13'),
-(3,'0.3','0.3.3','alter vim_tenant tables','2015-07-28'),
-(4,'0.4','0.3.5','enlarge graph field at sce_vnfs/nets','2015-10-20'),
-(5,'0.5','0.4.1','Add mac address for bridge interfaces','2015-12-14'),
-(6,'0.6','0.4.2','Adding VIM status info','2015-12-22'),
-(7,'0.7','0.4.3','Changing created_at time at database','2016-01-25'),
-(8,'0.8','0.4.32','Enlarging name at database','2016-02-01'),
-(9,'0.9','0.4.33','Add ACTIVE:NoMgmtIP to instance_vms table','2016-02-05'),
-(10,'0.10','0.4.36','tenant management of vnfs,scenarios','2016-03-08'),
-(11,'0.11','0.4.43','remove unique name at scenarios,instance_scenarios','2016-07-18'),
-(12,'0.12','0.4.46','create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces','2016-08-29'),
-(13,'0.13','0.4.47','insert cloud-config at scenarios,instance_scenarios','2016-08-30'),
-(14,'0.14','0.4.57','remove unique index vim_net_id, instance_scenario_id','2016-09-26'),
-(15,'0.15','0.4.59','add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL','2016-09-27'),
-(16,'0.16','0.5.2','enlarge vim_tenant_name and id. New config at datacenter_tenants','2016-10-11'),
-(17,'0.17','0.5.3','Extra description json format of additional devices in datacenter_flavors','2016-12-20'),
-(18,'0.18','0.5.4','Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'','2017-01-09'),
-(19,'0.19','0.5.5','Extra Boot-data content at VNFC (vms)','2017-01-11'),
-(20,'0.20','0.5.9','Added columns to store dataplane connectivity info','2017-03-13'),
-(21,'0.21','0.5.15','Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles','2017-06-02'),
-(22,'0.22','0.5.16','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-06-02'),
-(23,'0.23','0.5.20','Changed type of ram in flavors from SMALLINT to MEDIUMINT','2017-08-29'),
-(24,'0.24','0.5.21','Added vnfd fields','2017-08-29'),
-(25,'0.25','0.5.22','Added osm_id to vnfs,scenarios','2017-09-01'),
-(26,'0.26','0.5.23','Several changes','2017-09-09'),
-(27,'0.27','0.5.25','Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants','2017-09-29'),
-(28,'0.28','0.5.28','Adding VNFFG-related tables','2017-11-20'),
-(29,'0.29','0.5.59','Change member_vnf_index to str accordingly to the model','2018-04-11'),
-(30,'0.30','0.5.60','Add image_list to vms','2018-04-24'),
-(31,'0.31','0.5.61','Add vim_network_name to sce_nets','2018-05-03'),
-(32,'0.32','0.5.70','Add vim_name to instance vms','2018-06-28'),
-(33,'0.33','0.5.82','Add pdu information to vms','2018-11-13'),
-(34,'0.34','0.6.00','Added WIM tables','2018-09-10'),
-(35,'0.35','0.6.02','Adding ingress and egress ports for RSPs','2018-12-11'),
-(36,'0.36','0.6.03','Allow vm without image_id for PDUs','2018-12-19');
-/*!40000 ALTER TABLE `schema_version` ENABLE KEYS */;
-UNLOCK TABLES;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2018-12-10  9:58:03
diff --git a/database_utils/migrate_mano_db.sh b/database_utils/migrate_mano_db.sh
deleted file mode 100755 (executable)
index 096a21a..0000000
+++ /dev/null
@@ -1,1581 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#
-#Upgrade/Downgrade openmano database preserving the content
-#
-DBUTILS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-
-DBUSER="mano"
-DBPASS=""
-DEFAULT_DBPASS="manopw"
-DBHOST=""
-DBPORT="3306"
-DBNAME="mano_db"
-QUIET_MODE=""
-BACKUP_DIR=""
-BACKUP_FILE=""
-#TODO update it with the last database version
-LAST_DB_VERSION=39
-
-# Detect paths
-MYSQL=$(which mysql)
-AWK=$(which awk)
-GREP=$(which grep)
-
-function usage(){
-    echo -e "Usage: $0 OPTIONS [version]"
-    echo -e "  Upgrades/Downgrades openmano database preserving the content."\
-            "If [version]  is not provided, it is upgraded to the last version"
-    echo -e "  OPTIONS"
-    echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
-    echo -e "     -p PASS  database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
-    echo -e "     -P PORT  database port. '$DBPORT' by default"
-    echo -e "     -h HOST  database host. 'localhost' by default"
-    echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
-    echo -e "     -b DIR   backup folder where to create rollback backup file"
-    echo -e "     -q --quiet: Do not prompt for credentials and exit if cannot access to database"
-    echo -e "     --help   shows this help"
-}
-
-while getopts ":u:p:b:P:h:d:q-:" o; do
-    case "${o}" in
-        u)
-            DBUSER="$OPTARG"
-            ;;
-        p)
-            DBPASS="$OPTARG"
-            ;;
-        P)
-            DBPORT="$OPTARG"
-            ;;
-        d)
-            DBNAME="$OPTARG"
-            ;;
-        h)
-            DBHOST="$OPTARG"
-            ;;
-        b)
-            BACKUP_DIR="$OPTARG"
-            ;;
-        q)
-            export QUIET_MODE=yes
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && continue
-            echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
-            exit 1
-            ;;
-        \?)
-            echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit 1
-            ;;
-    esac
-done
-shift $((OPTIND-1))
-
-DB_VERSION=$1
-
-if [ -n "$DB_VERSION" ] ; then
-    # check it is a number and an allowed one
-    [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null || 
-        ! echo "parameter 'version' requires a integer value" >&2 || exit 1
-    if [ "$DB_VERSION" -lt 0 ] || [ "$DB_VERSION" -gt "$LAST_DB_VERSION" ] ; then
-        echo "parameter 'version' requires a valid database version between '0' and '$LAST_DB_VERSION'"\
-             "If you need an upper version, get a newer version of this script '$0'" >&2
-        exit 1
-    fi
-else
-    DB_VERSION="$LAST_DB_VERSION"
-fi
-
-# Creating temporary file
-TEMPFILE="$(mktemp -q --tmpdir "migratemanodb.XXXXXX")"
-trap 'rm -f "$TEMPFILE"' EXIT
-chmod 0600 "$TEMPFILE"
-DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
-echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
-
-# Check and ask for database user password
-FIRST_TRY="yes"
-while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
-do
-    # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
-    [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
-        echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
-        continue
-    echo "$DB_ERROR"
-    [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
-    echo -e "Provide database name and credentials (Ctrl+c to abort):"
-    read -e -p "    mysql database name($DBNAME): " KK
-    [ -n "$KK" ] && DBNAME="$KK"
-    read -e -p "    mysql user($DBUSER): " KK
-    [ -n "$KK" ] && DBUSER="$KK"
-    read -e -s -p "    mysql password: " DBPASS
-    echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
-    FIRST_TRY=""
-    echo
-done
-
-DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
-#echo DBCMD $DBCMD
-
-#check that the database seems a openmano database
-if ! echo -e "show create table vnfs;\nshow create table scenarios" | $DBCMD >/dev/null 2>&1
-then
-    echo "    database $DBNAME does not seem to be an openmano database" >&2
-    exit 1;
-fi
-
-#GET DATABASE TARGET VERSION
-#DB_VERSION=0
-#[ $OPENMANO_VER_NUM -ge 2002 ] && DB_VERSION=1   #0.2.2 =>  1
-#[ $OPENMANO_VER_NUM -ge 2005 ] && DB_VERSION=2   #0.2.5 =>  2
-#[ $OPENMANO_VER_NUM -ge 3003 ] && DB_VERSION=3   #0.3.3 =>  3
-#[ $OPENMANO_VER_NUM -ge 3005 ] && DB_VERSION=4   #0.3.5 =>  4
-#[ $OPENMANO_VER_NUM -ge 4001 ] && DB_VERSION=5   #0.4.1 =>  5
-#[ $OPENMANO_VER_NUM -ge 4002 ] && DB_VERSION=6   #0.4.2 =>  6
-#[ $OPENMANO_VER_NUM -ge 4003 ] && DB_VERSION=7   #0.4.3 =>  7
-#[ $OPENMANO_VER_NUM -ge 4032 ] && DB_VERSION=8   #0.4.32=>  8
-#[ $OPENMANO_VER_NUM -ge 4033 ] && DB_VERSION=9   #0.4.33=>  9
-#[ $OPENMANO_VER_NUM -ge 4036 ] && DB_VERSION=10  #0.4.36=>  10
-#[ $OPENMANO_VER_NUM -ge 4043 ] && DB_VERSION=11  #0.4.43=>  11
-#[ $OPENMANO_VER_NUM -ge 4046 ] && DB_VERSION=12  #0.4.46=>  12
-#[ $OPENMANO_VER_NUM -ge 4047 ] && DB_VERSION=13  #0.4.47=>  13
-#[ $OPENMANO_VER_NUM -ge 4057 ] && DB_VERSION=14  #0.4.57=>  14
-#[ $OPENMANO_VER_NUM -ge 4059 ] && DB_VERSION=15  #0.4.59=>  15
-#[ $OPENMANO_VER_NUM -ge 5002 ] && DB_VERSION=16  #0.5.2 =>  16
-#[ $OPENMANO_VER_NUM -ge 5003 ] && DB_VERSION=17  #0.5.3 =>  17
-#[ $OPENMANO_VER_NUM -ge 5004 ] && DB_VERSION=18  #0.5.4 =>  18
-#[ $OPENMANO_VER_NUM -ge 5005 ] && DB_VERSION=19  #0.5.5 =>  19
-#[ $OPENMANO_VER_NUM -ge 5009 ] && DB_VERSION=20  #0.5.9 =>  20
-#[ $OPENMANO_VER_NUM -ge 5015 ] && DB_VERSION=21  #0.5.15 =>  21
-#[ $OPENMANO_VER_NUM -ge 5016 ] && DB_VERSION=22  #0.5.16 =>  22
-#[ $OPENMANO_VER_NUM -ge 5020 ] && DB_VERSION=23  #0.5.20 =>  23
-#[ $OPENMANO_VER_NUM -ge 5021 ] && DB_VERSION=24  #0.5.21 =>  24
-#[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25  #0.5.22 =>  25
-#[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26  #0.5.24 =>  26
-#[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27  #0.5.25 =>  27
-#[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28  #0.5.52 =>  28
-#[ $OPENMANO_VER_NUM -ge 5059 ] && DB_VERSION=29  #0.5.59 =>  29
-#[ $OPENMANO_VER_NUM -ge 5060 ] && DB_VERSION=30  #0.5.60 =>  30
-#[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31  #0.5.61 =>  31
-#[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32  #0.5.70 =>  32
-#[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33  #0.5.82 =>  33
-#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34  #0.6.00 =>  34
-#[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35  #0.6.01 =>  35
-#[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36  #0.6.03 =>  36
-#[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37  #0.6.09 =>  37
-#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38  #0.6.11 =>  38
-#[ $OPENMANO_VER_NUM -ge 6020 ] && DB_VERSION=39  #0.6.20 =>  39
-#TODO ... put next versions here
-
-function upgrade_to_1(){
-    # echo "    upgrade database from version 0.0 to version 0.1"
-    echo "      CREATE TABLE \`schema_version\`"
-    sql "CREATE TABLE \`schema_version\` (
-       \`version_int\` INT NOT NULL COMMENT 'version as a number. Must not contain gaps',
-       \`version\` VARCHAR(20) NOT NULL COMMENT 'version as a text',
-       \`openmano_ver\` VARCHAR(20) NOT NULL COMMENT 'openmano version',
-       \`comments\` VARCHAR(2000) NULL COMMENT 'changes to database',
-       \`date\` DATE NULL,
-       PRIMARY KEY (\`version_int\`)
-       )
-       COMMENT='database schema control version'
-       COLLATE='utf8_general_ci'
-       ENGINE=InnoDB;"
-    sql "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openmano_ver\`, \`comments\`, \`date\`)
-        VALUES (1, '0.1', '0.2.2', 'insert schema_version', '2015-05-08');"
-}
-function downgrade_from_1(){
-    # echo "    downgrade database from version 0.1 to version 0.0"
-    echo "      DROP TABLE IF EXISTS \`schema_version\`"
-    sql "DROP TABLE IF EXISTS \`schema_version\`;"
-}
-function upgrade_to_2(){
-    # echo "    upgrade database from version 0.1 to version 0.2"
-    echo "      Add columns user/passwd to table 'vim_tenants'"
-    sql "ALTER TABLE vim_tenants ADD COLUMN user VARCHAR(36) NULL COMMENT 'Credentials for vim' AFTER created,
-       ADD COLUMN passwd VARCHAR(50) NULL COMMENT 'Credentials for vim' AFTER user;"
-    echo "      Add table 'images' and 'datacenters_images'"
-    sql "CREATE TABLE images (
-       uuid VARCHAR(36) NOT NULL,
-       name VARCHAR(50) NOT NULL,
-       location VARCHAR(200) NOT NULL,
-       description VARCHAR(100) NULL,
-       metadata VARCHAR(400) NULL,
-       PRIMARY KEY (uuid),
-       UNIQUE INDEX location (location)  )
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    sql "CREATE TABLE datacenters_images (
-       id INT NOT NULL AUTO_INCREMENT,
-       image_id VARCHAR(36) NOT NULL,
-       datacenter_id VARCHAR(36) NOT NULL,
-       vim_id VARCHAR(36) NOT NULL,
-       PRIMARY KEY (id),
-       CONSTRAINT FK__images FOREIGN KEY (image_id) REFERENCES images (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-       CONSTRAINT FK__datacenters_i FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE  )
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    echo "      migrate data from table 'vms' into 'images'"
-    sql "INSERT INTO images (uuid, name, location) SELECT DISTINCT vim_image_id, vim_image_id, image_path FROM vms;"
-    sql "INSERT INTO datacenters_images (image_id, datacenter_id, vim_id)
-          SELECT DISTINCT vim_image_id, datacenters.uuid, vim_image_id FROM vms JOIN datacenters;"
-    echo "      Add table 'flavors' and 'datacenter_flavors'"
-    sql "CREATE TABLE flavors (
-       uuid VARCHAR(36) NOT NULL,
-       name VARCHAR(50) NOT NULL,
-       description VARCHAR(100) NULL,
-       disk SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
-       ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
-       vcpus SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
-       extended VARCHAR(2000) NULL DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
-       PRIMARY KEY (uuid)  )
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    sql "CREATE TABLE datacenters_flavors (
-       id INT NOT NULL AUTO_INCREMENT,
-       flavor_id VARCHAR(36) NOT NULL,
-       datacenter_id VARCHAR(36) NOT NULL,
-       vim_id VARCHAR(36) NOT NULL,
-       PRIMARY KEY (id),
-       CONSTRAINT FK__flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-       CONSTRAINT FK__datacenters_f FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE  )
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    echo "      migrate data from table 'vms' into 'flavors'"
-    sql "INSERT INTO flavors (uuid, name) SELECT DISTINCT vim_flavor_id, vim_flavor_id FROM vms;"
-    sql "INSERT INTO datacenters_flavors (flavor_id, datacenter_id, vim_id)
-          SELECT DISTINCT vim_flavor_id, datacenters.uuid, vim_flavor_id FROM vms JOIN datacenters;"
-    sql "ALTER TABLE vms ALTER vim_flavor_id DROP DEFAULT, ALTER vim_image_id DROP DEFAULT;
-          ALTER TABLE vms CHANGE COLUMN vim_flavor_id flavor_id VARCHAR(36) NOT NULL COMMENT 'Link to flavor table' AFTER vnf_id,
-          CHANGE COLUMN vim_image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER flavor_id, 
-          ADD CONSTRAINT FK_vms_images  FOREIGN KEY (image_id) REFERENCES  images (uuid),
-          ADD CONSTRAINT FK_vms_flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid);"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (2, '0.2', '0.2.5', 'new tables images,flavors', '2015-07-13');"
-
-}   
-     
-function downgrade_from_2(){
-    # echo "    downgrade database from version 0.2 to version 0.1"
-    echo "       migrate back data from 'datacenters_images' 'datacenters_flavors' into 'vms'"
-    sql "ALTER TABLE vms ALTER image_id DROP DEFAULT, ALTER flavor_id DROP DEFAULT;
-          ALTER TABLE vms CHANGE COLUMN flavor_id vim_flavor_id VARCHAR(36) NOT NULL COMMENT 'Flavor ID in the VIM DB' AFTER vnf_id,
-          CHANGE COLUMN image_id vim_image_id VARCHAR(36) NOT NULL COMMENT 'Image ID in the VIM DB' AFTER vim_flavor_id,
-          DROP FOREIGN KEY FK_vms_flavors, DROP INDEX FK_vms_flavors,
-          DROP FOREIGN KEY FK_vms_images, DROP INDEX FK_vms_images;"
-#    echo "UPDATE v SET v.vim_image_id=di.vim_id
-#          FROM  vms as v INNER JOIN images as i ON v.vim_image_id=i.uuid 
-#          INNER JOIN datacenters_images as di ON i.uuid=di.image_id;"
-    echo "      Delete columns 'user/passwd' from 'vim_tenants'"
-    sql "ALTER TABLE vim_tenants DROP COLUMN user, DROP COLUMN passwd; "
-    echo "        delete tables 'datacenter_images', 'images'"
-    sql "DROP TABLE IF EXISTS \`datacenters_images\`;"
-    sql "DROP TABLE IF EXISTS \`images\`;"
-    echo "        delete tables 'datacenter_flavors', 'flavors'"
-    sql "DROP TABLE IF EXISTS \`datacenters_flavors\`;"
-    sql "DROP TABLE IF EXISTS \`flavors\`;"
-    sql "DELETE FROM schema_version WHERE version_int='2';"
-}
-
-function upgrade_to_3(){
-    # echo "    upgrade database from version 0.2 to version 0.3"
-    echo "      Change table 'logs', 'uuids"
-    sql "ALTER TABLE logs CHANGE COLUMN related related VARCHAR(36) NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
-    sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at VARCHAR(36) NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
-    echo "      Add column created to table 'datacenters_images' and 'datacenters_flavors'"
-    for table in datacenters_images datacenters_flavors
-    do
-        sql "ALTER TABLE $table ADD COLUMN created ENUM('true','false') NOT NULL DEFAULT 'false' 
-            COMMENT 'Indicates if it has been created by openmano, or already existed' AFTER vim_id;"
-    done
-    sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(2000) NULL DEFAULT NULL AFTER description;"
-    echo "      Allow null to column 'vim_interface_id' in 'instance_interfaces'"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
-    echo "      Add column config to table 'datacenters'"
-    sql "ALTER TABLE datacenters ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL COMMENT 'extra config information in json' AFTER vim_url_admin;
-       "
-    echo "      Add column datacenter_id to table 'vim_tenants'"
-    sql "ALTER TABLE vim_tenants ADD COLUMN datacenter_id VARCHAR(36) NULL COMMENT 'Datacenter of this tenant' AFTER uuid,
-       DROP INDEX name, DROP INDEX vim_tenant_id;"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL COMMENT 'tenant name at VIM' AFTER datacenter_id,
-       CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
-    echo "UPDATE vim_tenants as vt LEFT JOIN tenants_datacenters as td ON vt.uuid=td.vim_tenant_id
-       SET vt.datacenter_id=td.datacenter_id;"
-    sql "DELETE FROM vim_tenants WHERE datacenter_id is NULL;"
-    sql "ALTER TABLE vim_tenants ALTER datacenter_id DROP DEFAULT;
-       ALTER TABLE vim_tenants
-       CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL COMMENT 'Datacenter of this tenant' AFTER uuid;"
-    sql "ALTER TABLE vim_tenants ADD CONSTRAINT FK_vim_tenants_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid)
-       ON UPDATE CASCADE ON DELETE CASCADE;"
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (3, '0.3', '0.3.3', 'alter vim_tenant tables', '2015-07-28');"
-}
-
-
-function downgrade_from_3(){
-    # echo "    downgrade database from version 0.3 to version 0.2"
-    echo "      Change back table 'logs', 'uuids'"
-    sql "ALTER TABLE logs CHANGE COLUMN related related ENUM('nfvo_tenants','datacenters','vim_tenants','tenants_datacenters','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
-    sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at ENUM('nfvo_tenants','datacenters','vim_tenants','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
-    echo "      Delete column created from table 'datacenters_images' and 'datacenters_flavors'"
-    for table in datacenters_images datacenters_flavors
-    do
-        sql "ALTER TABLE $table DROP COLUMN created;"
-    done
-    sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(400) NULL DEFAULT NULL AFTER description;"
-    echo "      Deny back null to column 'vim_interface_id' in 'instance_interfaces'"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NOT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
-    echo "       Delete column config to table 'datacenters'"
-    sql "ALTER TABLE datacenters DROP COLUMN config;"
-    echo "       Delete column datacenter_id to table 'vim_tenants'"
-    sql "ALTER TABLE vim_tenants DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_vim_tenants_datacenters;"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name name VARCHAR(36) NULL DEFAULT NULL COMMENT '' AFTER uuid"
-    sql "ALTER TABLE vim_tenants ALTER name DROP DEFAULT;"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN name name VARCHAR(36) NOT NULL AFTER uuid" || ! echo "Warning changing column name at vim_tenants!"
-    sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX name (name);" || ! echo "Warning add unique index name at vim_tenants!"
-    sql "ALTER TABLE vim_tenants ALTER vim_tenant_id DROP DEFAULT;"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NOT NULL COMMENT 'Tenant ID in the VIM DB' AFTER name;" ||
-        ! echo "Warning changing column vim_tenant_id at vim_tenants!"
-    sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX vim_tenant_id (vim_tenant_id);" ||
-        ! echo "Warning add unique index vim_tenant_id at vim_tenants!"
-    sql "DELETE FROM schema_version WHERE version_int='3';"
-}
-
-function upgrade_to_4(){
-    # echo "    upgrade database from version 0.3 to version 0.4"
-    echo "      Enlarge graph field at tables 'sce_vnfs', 'sce_nets'"
-    for table in sce_vnfs sce_nets
-    do
-        sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
-    done
-    sql "ALTER TABLE datacenters CHANGE COLUMN type type VARCHAR(36) NOT NULL DEFAULT 'openvim' AFTER description;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (4, '0.4', '0.3.5', 'enlarge graph field at sce_vnfs/nets', '2015-10-20');"
-}
-
-function downgrade_from_4(){
-    # echo "    downgrade database from version 0.4 to version 0.3"
-    echo "      Shorten back graph field at tables 'sce_vnfs', 'sce_nets'"
-    for table in sce_vnfs sce_nets
-    do
-        sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
-    done
-    sql "ALTER TABLE datacenters CHANGE COLUMN type type ENUM('openvim','openstack') NOT NULL DEFAULT 'openvim' AFTER description;"
-    sql "DELETE FROM schema_version WHERE version_int='4';"
-}
-
-function upgrade_to_5(){
-    # echo "    upgrade database from version 0.4 to version 0.5"
-    echo "      Add 'mac' field for bridge interfaces in table 'interfaces'"
-    sql "ALTER TABLE interfaces ADD COLUMN mac CHAR(18) NULL DEFAULT NULL AFTER model;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (5, '0.5', '0.4.1', 'Add mac address for bridge interfaces', '2015-12-14');"
-}
-function downgrade_from_5(){
-    # echo "    downgrade database from version 0.5 to version 0.4"
-    echo "      Remove 'mac' field for bridge interfaces in table 'interfaces'"
-    sql "ALTER TABLE interfaces DROP COLUMN mac;"
-    sql "DELETE FROM schema_version WHERE version_int='5';"
-}
-
-function upgrade_to_6(){
-    # echo "    upgrade database from version 0.5 to version 0.6"
-    echo "      Add 'descriptor' field text to 'vnfd', 'scenarios'"
-    sql "ALTER TABLE vnfs ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the VNF' AFTER class;"
-    sql "ALTER TABLE scenarios ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the scenario' AFTER modified_at;"
-    echo "      Add 'last_error', 'vim_info' to 'instance_vms', 'instance_nets'"
-    sql "ALTER TABLE instance_vms  ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
-    sql "ALTER TABLE instance_vms  ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
-    sql "ALTER TABLE instance_vms  CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD' AFTER vim_vm_id;"
-    sql "ALTER TABLE instance_nets ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
-    sql "ALTER TABLE instance_nets ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
-    echo "      Add 'mac_address', 'ip_address', 'vim_info' to 'instance_interfaces'"
-    sql "ALTER TABLE instance_interfaces ADD COLUMN mac_address VARCHAR(32) NULL DEFAULT NULL AFTER vim_interface_id, ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac_address, ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER ip_address;"
-    echo "      Add 'sce_vnf_id','datacenter_id','vim_tenant_id' field to 'instance_vnfs'"
-    sql "ALTER TABLE instance_vnfs ADD COLUMN sce_vnf_id VARCHAR(36) NULL DEFAULT NULL AFTER vnf_id, ADD CONSTRAINT FK_instance_vnfs_sce_vnfs FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
-    sql "ALTER TABLE instance_vnfs ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_vnf_id, ADD CONSTRAINT FK_instance_vnfs_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
-    sql "ALTER TABLE instance_vnfs ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_vnfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
-    echo "      Add 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field to 'instance_nets'"
-    sql "ALTER TABLE instance_nets ADD COLUMN sce_net_id VARCHAR(36) NULL DEFAULT NULL AFTER instance_scenario_id, ADD CONSTRAINT FK_instance_nets_sce_nets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
-    sql "ALTER TABLE instance_nets ADD COLUMN net_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_net_id, ADD CONSTRAINT FK_instance_nets_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
-    sql "ALTER TABLE instance_nets ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER net_id, ADD CONSTRAINT FK_instance_nets_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
-    sql "ALTER TABLE instance_nets ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_nets_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (6, '0.6', '0.4.2', 'Adding VIM status info', '2015-12-22');"
-}
-function downgrade_from_6(){
-    # echo "    downgrade database from version 0.6 to version 0.5"
-    echo "      Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
-    sql "ALTER TABLE vnfs      DROP COLUMN descriptor;"
-    sql "ALTER TABLE scenarios DROP COLUMN descriptor;"
-    echo "      Remove 'last_error', 'vim_info' from 'instance_vms', 'instance_nets'"
-    sql "ALTER TABLE instance_vms  DROP COLUMN error_msg, DROP COLUMN vim_info;"
-    sql "ALTER TABLE instance_vms  CHANGE COLUMN status status ENUM('ACTIVE','PAUSED','INACTIVE','CREATING','ERROR','DELETING') NOT NULL DEFAULT 'CREATING' AFTER vim_vm_id;"
-    sql "ALTER TABLE instance_nets DROP COLUMN error_msg, DROP COLUMN vim_info;"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
-    echo "      Remove 'mac_address', 'ip_address', 'vim_info' from 'instance_interfaces'"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN mac_address, DROP COLUMN ip_address, DROP COLUMN vim_info;"
-    echo "      Remove 'sce_vnf_id','datacenter_id','vim_tenant_id' field from 'instance_vnfs'"
-    sql "ALTER TABLE instance_vnfs DROP COLUMN sce_vnf_id, DROP FOREIGN KEY FK_instance_vnfs_sce_vnfs;"
-    sql "ALTER TABLE instance_vnfs DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_vnfs_vim_tenants;"
-    sql "ALTER TABLE instance_vnfs DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_vnfs_datacenters;"
-    echo "      Remove 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field from 'instance_nets'"
-    sql "ALTER TABLE instance_nets DROP COLUMN sce_net_id, DROP FOREIGN KEY FK_instance_nets_sce_nets;"
-    sql "ALTER TABLE instance_nets DROP COLUMN net_id, DROP FOREIGN KEY FK_instance_nets_nets;"
-    sql "ALTER TABLE instance_nets DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_nets_vim_tenants;"
-    sql "ALTER TABLE instance_nets DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_nets_datacenters;"
-    sql "DELETE FROM schema_version WHERE version_int='6';"
-}
-
-function upgrade_to_7(){
-    # echo "    upgrade database from version 0.6 to version 0.7"
-    echo "      Change created_at, modified_at from timestamp to unix float at all database"
-    for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
-    do
-         echo -en "        $table               \r"
-         sql "ALTER TABLE $table ADD COLUMN created_at_ DOUBLE NOT NULL after created_at;"
-         echo "UPDATE $table SET created_at_=unix_timestamp(created_at);"
-         sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at DOUBLE NOT NULL;"
-         [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at DOUBLE NULL DEFAULT NULL;"
-    done
-    
-    echo "      Add 'descriptor' field text to 'vnfd', 'scenarios'"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (7, '0.7', '0.4.3', 'Changing created_at time at database', '2016-01-25');"
-}
-function downgrade_from_7(){
-    # echo "    downgrade database from version 0.7 to version 0.6"
-    echo "      Change back created_at, modified_at from unix float to timestamp at all database"
-    for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
-    do
-         echo -en "        $table               \r"
-         sql "ALTER TABLE $table ADD COLUMN created_at_ TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP after created_at;"
-         echo "UPDATE $table SET created_at_=from_unixtime(created_at);"
-         sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP;"
-         [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at TIMESTAMP NULL DEFAULT NULL;"
-    done
-    echo "      Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
-    sql "DELETE FROM schema_version WHERE version_int='7';"
-}
-
-function upgrade_to_8(){
-    # echo "    upgrade database from version 0.7 to version 0.8"
-    echo "      Change enalarge name, description to 255 at all database"
-    for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
-    do
-         echo -en "        $table               \r"
-         sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR(255) NOT NULL;"
-         sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL;"
-    done
-    echo -en "        interfaces           \r"
-    sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(255) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(255) NULL DEFAULT NULL;"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL;"
-    echo -en "        vim_tenants          \r"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(64) NULL DEFAULT NULL;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (8, '0.8', '0.4.32', 'Enlarging name at database', '2016-02-01');"
-}
-function downgrade_from_8(){
-    # echo "    downgrade database from version 0.8 to version 0.7"
-    echo "      Change back name,description to shorter length at all database"
-    for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
-    do
-         name_length=50
-         [[ $table == flavors ]] || [[ $table == images ]] || name_length=36 
-         echo -en "        $table               \r"
-         sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL;"
-         sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL;"
-    done
-    echo -en "        interfaces           \r"
-    sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(25) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(25) NULL DEFAULT NULL;"
-    echo -en "        vim_tenants          \r"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL;"
-    sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(50) NULL DEFAULT NULL;"
-    sql "DELETE FROM schema_version WHERE version_int='8';"
-}
-function upgrade_to_9(){
-    # echo "    upgrade database from version 0.8 to version 0.9"
-    echo "      Add more status to 'instance_vms'"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (9, '0.9', '0.4.33', 'Add ACTIVE:NoMgmtIP to instance_vms table', '2016-02-05');"
-}
-function downgrade_from_9(){
-    # echo "    downgrade database from version 0.9 to version 0.8"
-    echo "      Add more status to 'instance_vms'"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
-    sql "DELETE FROM schema_version WHERE version_int='9';"
-}
-function upgrade_to_10(){
-    # echo "    upgrade database from version 0.9 to version 0.10"
-    echo "      add tenant to 'vnfs'"
-    sql "ALTER TABLE vnfs ADD COLUMN tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER name, ADD CONSTRAINT FK_vnfs_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE SET NULL, CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'false' AFTER physical, DROP INDEX name, DROP INDEX path, DROP COLUMN path;"
-    sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
-    sql "ALTER TABLE scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
-    sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
-    sql "ALTER TABLE instance_scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
-    echo "      rename 'vim_tenants' table to 'datacenter_tenants'"
-    echo "RENAME TABLE vim_tenants TO datacenter_tenants;"
-    for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
-    do
-        NULL="NOT NULL"
-        [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
-        sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_vim_tenants;"
-        sql "ALTER TABLE ${table} ALTER vim_tenant_id DROP DEFAULT;"
-        sql "ALTER TABLE ${table} CHANGE COLUMN vim_tenant_id datacenter_tenant_id VARCHAR(36)  ${NULL} AFTER datacenter_id, ADD CONSTRAINT FK_${table}_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid); "
-    done    
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (10, '0.10', '0.4.36', 'tenant management of vnfs,scenarios', '2016-03-08');"
-}
-
-function downgrade_from_10(){
-    # echo "    downgrade database from version 0.10 to version 0.9"
-    echo "      remove tenant from 'vnfs'"
-    sql "ALTER TABLE vnfs DROP COLUMN tenant_id, DROP FOREIGN KEY FK_vnfs_nfvo_tenants, ADD UNIQUE INDEX name (name), ADD COLUMN path VARCHAR(100) NULL DEFAULT NULL COMMENT 'Path where the YAML descriptor of the VNF can be found. NULL if it is a physical network function.' AFTER name, ADD UNIQUE INDEX path (path), CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'true' AFTER physical;"
-    sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
-    sql "ALTER TABLE scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
-    sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
-    sql "ALTER TABLE instance_scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
-    echo "      rename back 'datacenter_tenants' table to 'vim_tenants'"
-    echo "RENAME TABLE datacenter_tenants TO vim_tenants;"
-    for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
-    do
-        sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_datacenter_tenants;"
-        NULL="NOT NULL"
-        [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
-        sql "ALTER TABLE ${table} ALTER datacenter_tenant_id DROP DEFAULT;"
-        sql "ALTER TABLE ${table} CHANGE COLUMN datacenter_tenant_id vim_tenant_id VARCHAR(36) $NULL AFTER datacenter_id, ADD CONSTRAINT FK_${table}_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid); "
-    done    
-    sql "DELETE FROM schema_version WHERE version_int='10';"
-}
-
-function upgrade_to_11(){
-    # echo "    upgrade database from version 0.10 to version 0.11"
-    echo "      remove unique name at 'scenarios', 'instance_scenarios'"
-    sql "ALTER TABLE scenarios DROP INDEX name;"
-    sql "ALTER TABLE instance_scenarios DROP INDEX name;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (11, '0.11', '0.4.43', 'remove unique name at scenarios,instance_scenarios', '2016-07-18');"
-}
-function downgrade_from_11(){
-    # echo "    downgrade database from version 0.11 to version 0.10"
-    echo "      add unique name at 'scenarios', 'instance_scenarios'"
-    sql "ALTER TABLE scenarios ADD UNIQUE INDEX name (name);"
-    sql "ALTER TABLE instance_scenarios ADD UNIQUE INDEX name (name);"
-    sql "DELETE FROM schema_version WHERE version_int='11';"
-}
-
-function upgrade_to_12(){
-    # echo "    upgrade database from version 0.11 to version 0.12"
-    echo "      create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to 'interfaces' and 'sce_interfaces'"
-    sql "CREATE TABLE IF NOT EXISTS ip_profiles (
-       id INT(11) NOT NULL AUTO_INCREMENT,
-       net_id VARCHAR(36) NULL DEFAULT NULL,
-       sce_net_id VARCHAR(36) NULL DEFAULT NULL,
-       instance_net_id VARCHAR(36) NULL DEFAULT NULL,
-       ip_version ENUM('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
-       subnet_address VARCHAR(64) NULL DEFAULT NULL,
-       gateway_address VARCHAR(64) NULL DEFAULT NULL,
-       dns_address VARCHAR(64) NULL DEFAULT NULL,
-       dhcp_enabled ENUM('true','false') NOT NULL DEFAULT 'true',
-       dhcp_start_address VARCHAR(64) NULL DEFAULT NULL,
-       dhcp_count INT(11) NULL DEFAULT NULL,
-       PRIMARY KEY (id),
-       CONSTRAINT FK_ipprofiles_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON DELETE CASCADE,
-       CONSTRAINT FK_ipprofiles_scenets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON DELETE CASCADE,
-       CONSTRAINT FK_ipprofiles_instancenets FOREIGN KEY (instance_net_id) REFERENCES instance_nets (uuid) ON DELETE CASCADE  )
-        COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.'
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    sql "ALTER TABLE interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac;"
-    sql "ALTER TABLE sce_interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER interface_id;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (12, '0.12', '0.4.46', 'create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces', '2016-08-29');"
-}
-function downgrade_from_12(){
-    # echo "    downgrade database from version 0.12 to version 0.11"
-    echo "      delete ip_profiles table, and remove ip_address column in 'interfaces' and 'sce_interfaces'"
-    sql "DROP TABLE IF EXISTS ip_profiles;"
-    sql "ALTER TABLE interfaces DROP COLUMN ip_address;"
-    sql "ALTER TABLE sce_interfaces DROP COLUMN ip_address;"
-    sql "DELETE FROM schema_version WHERE version_int='12';"
-}
-
-function upgrade_to_13(){
-    # echo "    upgrade database from version 0.12 to version 0.13"
-    echo "      add cloud_config at 'scenarios', 'instance_scenarios'"
-    sql "ALTER TABLE scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER descriptor;"
-    sql "ALTER TABLE instance_scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER modified_at;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (13, '0.13', '0.4.47', 'insert cloud-config at scenarios,instance_scenarios', '2016-08-30');"
-}
-function downgrade_from_13(){
-    # echo "    downgrade database from version 0.13 to version 0.12"
-    echo "      remove cloud_config at 'scenarios', 'instance_scenarios'"
-    sql "ALTER TABLE scenarios DROP COLUMN cloud_config;"
-    sql "ALTER TABLE instance_scenarios DROP COLUMN cloud_config;"
-    sql "DELETE FROM schema_version WHERE version_int='13';"
-}
-
-function upgrade_to_14(){
-    # echo "    upgrade database from version 0.13 to version 0.14"
-    echo "      remove unique index vim_net_id, instance_scenario_id at table 'instance_nets'"
-    sql "ALTER TABLE instance_nets DROP INDEX vim_net_id_instance_scenario_id;"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN external created ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM' AFTER multipoint;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (14, '0.14', '0.4.57', 'remove unique index vim_net_id, instance_scenario_id', '2016-09-26');"
-}
-function downgrade_from_14(){
-    # echo "    downgrade database from version 0.14 to version 0.13"
-    echo "      remove cloud_config at 'scenarios', 'instance_scenarios'"
-    sql "ALTER TABLE instance_nets ADD UNIQUE INDEX vim_net_id_instance_scenario_id (vim_net_id, instance_scenario_id);"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN created external ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, means that it already exists at VIM' AFTER multipoint;"
-    sql "DELETE FROM schema_version WHERE version_int='14';"
-}
-
-function upgrade_to_15(){
-    # echo "    upgrade database from version 0.14 to version 0.15"
-    echo "      add columns 'universal_name' and 'checksum' at table 'images', add unique index universal_name_checksum, and change location to allow NULL; change column 'image_path' in table 'vms' to allow NULL"
-    sql "ALTER TABLE images ADD COLUMN checksum VARCHAR(32) NULL DEFAULT NULL AFTER name;"
-    sql "ALTER TABLE images ALTER location DROP DEFAULT;"
-    sql "ALTER TABLE images ADD COLUMN universal_name VARCHAR(255) NULL AFTER name, CHANGE COLUMN location location VARCHAR(200) NULL AFTER checksum, ADD UNIQUE INDEX universal_name_checksum (universal_name, checksum);"
-    sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
-    sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (15, '0.15', '0.4.59', 'add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL', '2016-09-27');"
-}
-function downgrade_from_15(){
-    # echo "    downgrade database from version 0.15 to version 0.14"
-    echo "      remove columns 'universal_name' and 'checksum' from table 'images', remove index universal_name_checksum, change location NOT NULL; change column 'image_path' in table 'vms' to NOT NULL"
-    sql "ALTER TABLE images DROP INDEX universal_name_checksum;"
-    sql "ALTER TABLE images ALTER location DROP DEFAULT;"
-    sql "ALTER TABLE images CHANGE COLUMN location location VARCHAR(200) NOT NULL AFTER checksum;"
-    sql "ALTER TABLE images DROP COLUMN universal_name;"
-    sql "ALTER TABLE images DROP COLUMN checksum;"
-    sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
-    sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NOT NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
-    sql "DELETE FROM schema_version WHERE version_int='15';"
-}
-
-function upgrade_to_16(){
-    # echo "    upgrade database from version 0.15 to version 0.16"
-    echo "      add column 'config' at table 'datacenter_tenants', enlarge 'vim_tenant_name/id'"
-    sql "ALTER TABLE datacenter_tenants ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL AFTER passwd;"
-    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(256) NULL DEFAULT NULL AFTER datacenter_id;"
-    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(256) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (16, '0.16', '0.5.2', 'enlarge vim_tenant_name and id. New config at datacenter_tenants', '2016-10-11');"
-}
-function downgrade_from_16(){
-    # echo "    downgrade database from version 0.16 to version 0.15"
-    echo "      remove column 'config' at table 'datacenter_tenants', restoring lenght 'vim_tenant_name/id'"
-    sql "ALTER TABLE datacenter_tenants DROP COLUMN config;"
-    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL AFTER datacenter_id;"
-    sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
-    sql "DELETE FROM schema_version WHERE version_int='16';"
-}
-
-function upgrade_to_17(){
-    # echo "    upgrade database from version 0.16 to version 0.17"
-    echo "      add column 'extended' at table 'datacenter_flavors'"
-    sql "ALTER TABLE datacenters_flavors ADD extended varchar(2000) NULL COMMENT 'Extra description json format of additional devices';"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (17, '0.17', '0.5.3', 'Extra description json format of additional devices in datacenter_flavors', '2016-12-20');"
-}
-function downgrade_from_17(){
-    # echo "    downgrade database from version 0.17 to version 0.16"
-    echo "      remove column 'extended' from table 'datacenter_flavors'"
-    sql "ALTER TABLE datacenters_flavors DROP COLUMN extended;"
-    sql "DELETE FROM schema_version WHERE version_int='17';"
-}
-
-function upgrade_to_18(){
-    # echo "    upgrade database from version 0.17 to version 0.18"
-    echo "      add columns 'floating_ip' and 'port_security' at tables 'interfaces' and 'instance_interfaces'"
-    sql "ALTER TABLE interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
-    sql "ALTER TABLE interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
-    sql "ALTER TABLE instance_interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
-    sql "ALTER TABLE instance_interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (18, '0.18', '0.5.4', 'Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'', '2017-01-09');"
-}
-function downgrade_from_18(){
-    # echo "    downgrade database from version 0.18 to version 0.17"
-    echo "      remove columns 'floating_ip' and 'port_security' from tables 'interfaces' and 'instance_interfaces'"
-    sql "ALTER TABLE interfaces DROP COLUMN floating_ip;"
-    sql "ALTER TABLE interfaces DROP COLUMN port_security;"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN floating_ip;"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN port_security;"
-    sql "DELETE FROM schema_version WHERE version_int='18';"
-}
-
-function upgrade_to_19(){
-    # echo "    upgrade database from version 0.18 to version 0.19"
-    echo "      add column 'boot_data' at table 'vms'"
-    sql "ALTER TABLE vms ADD COLUMN boot_data TEXT NULL DEFAULT NULL AFTER image_path;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (19, '0.19', '0.5.5', 'Extra Boot-data content at VNFC (vms)', '2017-01-11');"
-}
-function downgrade_from_19(){
-    # echo "    downgrade database from version 0.19 to version 0.18"
-    echo "      remove column 'boot_data' from table 'vms'"
-    sql "ALTER TABLE vms DROP COLUMN boot_data;"
-    sql "DELETE FROM schema_version WHERE version_int='19';"
-}
-
-function upgrade_to_20(){
-    # echo "    upgrade database from version 0.19 to version 0.20"
-    echo "      add column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
-    sql "ALTER TABLE instance_nets ADD sdn_net_id varchar(36) DEFAULT NULL NULL COMMENT 'Network id in ovim';"
-    sql "ALTER TABLE instance_interfaces ADD sdn_port_id varchar(36) DEFAULT NULL NULL COMMENT 'Port id in ovim';"
-    sql "ALTER TABLE instance_interfaces ADD compute_node varchar(100) DEFAULT NULL NULL COMMENT 'Compute node id used to specify the SDN port mapping';"
-    sql "ALTER TABLE instance_interfaces ADD pci varchar(12) DEFAULT NULL NULL COMMENT 'PCI of the physical port in the host';"
-    sql "ALTER TABLE instance_interfaces ADD vlan SMALLINT UNSIGNED DEFAULT NULL NULL COMMENT 'VLAN tag used by the port';"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (20, '0.20', '0.5.9', 'Added columns to store dataplane connectivity info', '2017-03-13');"
-}
-function downgrade_from_20(){
-    # echo "    downgrade database from version 0.20 to version 0.19"
-    echo "      remove column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
-    sql "ALTER TABLE instance_nets DROP COLUMN sdn_net_id;"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN vlan;"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN pci;"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN compute_node;"
-    sql "ALTER TABLE instance_interfaces DROP COLUMN sdn_port_id;"
-    sql "DELETE FROM schema_version WHERE version_int='20';"
-}
-
-function upgrade_to_21(){
-    # echo "    upgrade database from version 0.20 to version 0.21"
-    echo "      edit 'instance_nets' to allow instance_scenario_id=None"
-    sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NULL;"
-    echo "      enlarge column 'dns_address' at table 'ip_profiles'"
-    sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(255) DEFAULT NULL NULL "\
-         "comment 'dns ip list separated by semicolon';"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (21, '0.21', '0.5.15', 'Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles', '2017-06-02');"
-}
-function downgrade_from_21(){
-    # echo "    downgrade database from version 0.21 to version 0.20"
-    echo "      edit 'instance_nets' to disallow instance_scenario_id=None"
-    #Delete all lines with a instance_scenario_id=NULL in order to disable this option
-    sql "DELETE FROM instance_nets WHERE instance_scenario_id IS NULL;"
-    sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NOT NULL;"
-    echo "      shorten column 'dns_address' at table 'ip_profiles'"
-    sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(64) DEFAULT NULL NULL;"
-    sql "DELETE FROM schema_version WHERE version_int='21';"
-}
-
-function upgrade_to_22(){
-    # echo "    upgrade database from version 0.21 to version 0.22"
-    echo "      Changed type of ram in 'flavors' from SMALLINT to MEDIUMINT"
-    sql "ALTER TABLE flavors CHANGE COLUMN ram ram MEDIUMINT(7) UNSIGNED NULL DEFAULT NULL AFTER disk;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (22, '0.22', '0.5.16', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-06-02');"
-}
-function downgrade_from_22(){
-    # echo "    downgrade database from version 0.22 to version 0.21"
-    echo "      Changed type of ram in 'flavors' from MEDIUMINT to SMALLINT"
-    sql "ALTER TABLE flavors CHANGE COLUMN ram ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER disk;"
-    sql "DELETE FROM schema_version WHERE version_int='22';"
-}
-
-function upgrade_to_23(){
-    # echo "    upgrade database from version 0.22 to version 0.23"
-    echo "      add column 'availability_zone' at table 'vms'"
-    sql "ALTER TABLE vms ADD COLUMN availability_zone VARCHAR(255) NULL AFTER modified_at;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (23, '0.23', '0.5.20',"\
-        "'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-08-29');"
-}
-function downgrade_from_23(){
-    # echo "    downgrade database from version 0.23 to version 0.22"
-    echo "      remove column 'availability_zone' from table 'vms'"
-    sql "ALTER TABLE vms DROP COLUMN availability_zone;"
-    sql "DELETE FROM schema_version WHERE version_int='23';"
-}
-
-function upgrade_to_24(){
-    # echo "    upgrade database from version 0.23 to version 0.24"
-    echo "      Add 'count' to table 'vms'"
-
-    sql "ALTER TABLE vms ADD COLUMN count SMALLINT NOT NULL DEFAULT '1' AFTER vnf_id;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (24, '0.24', '0.5.21', 'Added vnfd fields', '2017-08-29');"
-}
-function downgrade_from_24(){
-    # echo "    downgrade database from version 0.24 to version 0.23"
-    echo "      Remove 'count' from table 'vms'"
-    sql "ALTER TABLE vms DROP COLUMN count;"
-    sql "DELETE FROM schema_version WHERE version_int='24';"
-}
-function upgrade_to_25(){
-    # echo "    upgrade database from version 0.24 to version 0.25"
-    echo "      Add 'osm_id','short_name','vendor' to tables 'vnfs', 'scenarios'"
-    for table in vnfs scenarios; do
-        sql "ALTER TABLE $table ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid, "\
-             "ADD UNIQUE INDEX osm_id_tenant_id (osm_id, tenant_id), "\
-             "ADD COLUMN short_name VARCHAR(255) NULL AFTER name, "\
-             "ADD COLUMN vendor VARCHAR(255) NULL AFTER description;"
-    done
-    sql "ALTER TABLE vnfs ADD COLUMN mgmt_access VARCHAR(2000) NULL AFTER vendor;"
-    sql "ALTER TABLE vms ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
-    sql "ALTER TABLE sce_vnfs ADD COLUMN member_vnf_index SMALLINT(6) NULL DEFAULT NULL AFTER uuid;"
-    echo "      Add 'security_group' to table 'ip_profiles'"
-    sql "ALTER TABLE ip_profiles ADD COLUMN security_group VARCHAR(255) NULL DEFAULT NULL AFTER dhcp_count;"
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (25, '0.25', '0.5.22', 'Added osm_id to vnfs,scenarios', '2017-09-01');"
-}
-function downgrade_from_25(){
-    # echo "    downgrade database from version 0.25 to version 0.24"
-    echo "      Remove 'osm_id','short_name','vendor' from tables 'vnfs', 'scenarios'"
-    for table in vnfs scenarios; do
-        sql "ALTER TABLE $table DROP INDEX  osm_id_tenant_id, DROP COLUMN osm_id, "\
-             "DROP COLUMN short_name, DROP COLUMN vendor;"
-    done
-    sql "ALTER TABLE vnfs DROP COLUMN mgmt_access;"
-    sql "ALTER TABLE vms DROP COLUMN osm_id;"
-    sql "ALTER TABLE sce_vnfs DROP COLUMN member_vnf_index;"
-    echo "      Remove 'security_group' from table 'ip_profiles'"
-    sql "ALTER TABLE ip_profiles DROP COLUMN security_group;"
-
-    sql "DELETE FROM schema_version WHERE version_int='25';"
-}
-
-function upgrade_to_26(){
-    echo "      Add name to table datacenter_tenants"
-    sql "ALTER TABLE datacenter_tenants ADD COLUMN name VARCHAR(255) NULL AFTER uuid;"
-    sql "UPDATE datacenter_tenants as dt join datacenters as d on dt.datacenter_id = d.uuid set dt.name=d.name;"
-    echo "      Add 'SCHEDULED' to 'status' at tables 'instance_nets', 'instance_vms'"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
-         "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') "\
-         "NOT NULL DEFAULT 'BUILD';"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','DOWN','BUILD','ERROR',"\
-         "'VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD';"
-    echo "      Enlarge pci at instance_interfaces to allow extended pci for SDN por mapping"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(50) NULL DEFAULT NULL COMMENT 'PCI of the "\
-        "physical port in the host' AFTER compute_node;"
-
-    for t in flavor image; do
-        echo "      Change 'datacenters_${t}s' to point to datacenter_tenant, add status, vim_info"
-        sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_vim_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
-            "datacenter_id, ADD COLUMN status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED',"\
-            "'SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD' AFTER vim_id, ADD COLUMN vim_info "\
-            "TEXT NULL AFTER status;"
-        sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.datacenter_id=df.datacenter_id "\
-            "set df.datacenter_vim_id=dt.uuid;"
-        sql "DELETE FROM datacenters_${t}s WHERE datacenter_vim_id is NULL;"
-        sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_vim_id datacenter_vim_id VARCHAR(36) NOT NULL;"
-        sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK_datacenters_${t}s_datacenter_tenants FOREIGN KEY "\
-            "(datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE;"
-        sql "ALTER TABLE datacenters_${t}s DROP FOREIGN KEY FK__datacenters_${t:0:1};"
-        sql "ALTER TABLE datacenters_${t}s DROP COLUMN datacenter_id;"
-       done
-
-    echo "      Decoupling 'instance_interfaces' from scenarios/vnfs to allow scale actions"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT NULL;"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NULL DEFAULT NULL;"
-       sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
-       sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
-           "REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
-
-    echo "      Decoupling 'instance_vms' from scenarios/vnfs to allow scale actions"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(128) NULL DEFAULT NULL;"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NULL DEFAULT NULL;"
-       sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
-       sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
-           "REFERENCES vms (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
-
-    echo "      Decoupling 'instance_nets' from scenarios/vnfs to allow scale actions"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL;"
-
-    echo "      Decoupling 'instance_scenarios' from scenarios"
-    sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NULL DEFAULT NULL;"
-       sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
-       sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
-           "REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
-
-    echo "      Create table instance_actions, vim_actions"
-    sql "CREATE TABLE IF NOT EXISTS instance_actions (
-           uuid VARCHAR(36) NOT NULL,
-           tenant_id VARCHAR(36) NULL DEFAULT NULL,
-           instance_id VARCHAR(36) NULL DEFAULT NULL,
-           description VARCHAR(64) NULL DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
-           number_tasks SMALLINT(6) NOT NULL DEFAULT '1',
-           number_done SMALLINT(6) NOT NULL DEFAULT '0',
-           number_failed SMALLINT(6) NOT NULL DEFAULT '0',
-           created_at DOUBLE NOT NULL,
-           modified_at DOUBLE NULL DEFAULT NULL,
-           PRIMARY KEY (uuid),
-        INDEX FK_actions_tenants (tenant_id),
-       CONSTRAINT FK_actions_tenant FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-               COMMENT='Contains client actions over instances'
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"  
-
-    sql "CREATE TABLE IF NOT EXISTS vim_actions (
-           instance_action_id VARCHAR(36) NOT NULL,
-           task_index INT(6) NOT NULL,
-           datacenter_vim_id VARCHAR(36) NOT NULL,
-           vim_id VARCHAR(64) NULL DEFAULT NULL,
-           action VARCHAR(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
-           item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored',
-           item_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'uuid of the entry in the table',
-           status ENUM('SCHEDULED', 'BUILD', 'DONE', 'FAILED', 'SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
-           extra TEXT NULL DEFAULT NULL COMMENT 'json with params:, depends_on: for the task',
-           error_msg VARCHAR(1024) NULL DEFAULT NULL,
-           created_at DOUBLE NOT NULL,
-           modified_at DOUBLE NULL DEFAULT NULL,
-           PRIMARY KEY (task_index, instance_action_id),
-        INDEX FK_actions_instance_actions (instance_action_id),
-       CONSTRAINT FK_actions_instance_actions FOREIGN KEY (instance_action_id) REFERENCES instance_actions (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-        INDEX FK_actions_vims (datacenter_vim_id),
-       CONSTRAINT FK_actions_vims FOREIGN KEY (datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-        COMMENT='Table with the individual VIM actions.'
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"  
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (26, '0.26', '0.5.23', 'Several changes', '2017-09-09');"
-}
-function downgrade_from_26(){
-    echo "      Remove name from table datacenter_tenants"
-    sql "ALTER TABLE datacenter_tenants DROP COLUMN name;"
-    echo "      Remove 'SCHEDULED' from the 'status' at tables 'instance_nets', 'instance_vms'"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
-         "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR',"\
-         "'INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD';"
-    echo "      Shorten back pci at instance_interfaces to allow extended pci for SDN por mapping"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(12) NULL DEFAULT NULL COMMENT 'PCI of the "\
-        "physical port in the host' AFTER compute_node;"
-
-    for t in flavor image; do
-        echo "      Restore back 'datacenters_${t}s'"
-        sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
-            "${t}_id, DROP COLUMN status, DROP COLUMN vim_info ;"
-        sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.uuid=df.datacenter_vim_id set "\
-            "df.datacenter_id=dt.datacenter_id;"
-        sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL;"
-        sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK__datacenters_${t:0:1} FOREIGN KEY "\
-            "(datacenter_id) REFERENCES datacenters (uuid), DROP FOREIGN KEY FK_datacenters_${t}s_datacenter_tenants, "\
-            "DROP COLUMN datacenter_vim_id;"
-    done
-
-    echo "      Restore back 'instance_interfaces' coupling to scenarios/vnfs"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL;"
-       sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NOT NULL;"
-       sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
-           "REFERENCES interfaces (uuid);"
-
-    echo "      Restore back 'instance_vms' coupling to scenarios/vnfs"
-    echo "      Decoupling 'instance vms' from scenarios/vnfs to allow scale actions"
-    sql "UPDATE instance_vms SET vim_vm_id='' WHERE vim_vm_id is NULL;"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NOT NULL;"
-       sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NOT NULL;"
-       sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
-           "REFERENCES vms (uuid);"
-
-    echo "      Restore back 'instance_nets' coupling to scenarios/vnfs"
-    sql "UPDATE instance_nets SET vim_net_id='' WHERE vim_net_id is NULL;"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL;"
-
-    echo "      Restore back  'instance_scenarios' coupling to scenarios"
-       sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
-    sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NOT NULL;"
-       sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
-           "REFERENCES scenarios (uuid);"
-
-    echo "      Delete table instance_actions"
-    sql "DROP TABLE IF EXISTS vim_actions"
-    sql "DROP TABLE IF EXISTS instance_actions"
-    sql "DELETE FROM schema_version WHERE version_int='26';"
-}
-
-function upgrade_to_27(){
-    echo "      Added 'encrypted_RO_priv_key','RO_pub_key' to table 'nfvo_tenants'"
-    sql "ALTER TABLE nfvo_tenants ADD COLUMN encrypted_RO_priv_key VARCHAR(2000) NULL AFTER description;"
-    sql "ALTER TABLE nfvo_tenants ADD COLUMN RO_pub_key VARCHAR(510) NULL AFTER encrypted_RO_priv_key;"
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (27, '0.27', '0.5.25', 'Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants', '2017-09-29');"
-}
-function downgrade_from_27(){
-    echo "      Remove 'encrypted_RO_priv_key','RO_pub_key' from table 'nfvo_tenants'"
-    sql "ALTER TABLE nfvo_tenants DROP COLUMN encrypted_RO_priv_key;"
-    sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
-    sql "DELETE FROM schema_version WHERE version_int='27';"
-}
-function upgrade_to_28(){
-    echo "      [Adding necessary tables for VNFFG]"
-    echo "      Adding sce_vnffgs"
-    sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
-            uuid VARCHAR(36) NOT NULL,
-            tenant_id VARCHAR(36) NULL DEFAULT NULL,
-            name VARCHAR(255) NOT NULL,
-            description VARCHAR(255) NULL DEFAULT NULL,
-            vendor VARCHAR(255) NULL DEFAULT NULL,
-            scenario_id VARCHAR(36) NOT NULL,
-            created_at DOUBLE NOT NULL,
-            modified_at DOUBLE NULL DEFAULT NULL,
-        PRIMARY KEY (uuid),
-        INDEX FK_scenarios_sce_vnffg (scenario_id),
-        CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    echo "      Adding sce_rsps"
-    sql "CREATE TABLE IF NOT EXISTS sce_rsps (
-            uuid VARCHAR(36) NOT NULL,
-            tenant_id VARCHAR(36) NULL DEFAULT NULL,
-            name VARCHAR(255) NOT NULL,
-            sce_vnffg_id VARCHAR(36) NOT NULL,
-            created_at DOUBLE NOT NULL,
-            modified_at DOUBLE NULL DEFAULT NULL,
-        PRIMARY KEY (uuid),
-        INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
-        CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    echo "      Adding sce_rsp_hops"
-    sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
-            uuid VARCHAR(36) NOT NULL,
-            if_order INT DEFAULT 0 NOT NULL,
-            interface_id VARCHAR(36) NOT NULL,
-            sce_vnf_id VARCHAR(36) NOT NULL,
-            sce_rsp_id VARCHAR(36) NOT NULL,
-            created_at DOUBLE NOT NULL,
-            modified_at DOUBLE NULL DEFAULT NULL,
-        PRIMARY KEY (uuid),
-        INDEX FK_interfaces_rsp_hop (interface_id),
-        INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
-        INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
-        CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-        CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-        CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    echo "      Adding sce_classifiers"
-    sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
-            uuid VARCHAR(36) NOT NULL,
-            tenant_id VARCHAR(36) NULL DEFAULT NULL,
-            name VARCHAR(255) NOT NULL,
-            sce_vnffg_id VARCHAR(36) NOT NULL,
-            sce_rsp_id VARCHAR(36) NOT NULL,
-            sce_vnf_id VARCHAR(36) NOT NULL,
-            interface_id VARCHAR(36) NOT NULL,
-            created_at DOUBLE NOT NULL,
-            modified_at DOUBLE NULL DEFAULT NULL,
-        PRIMARY KEY (uuid),
-        INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
-        INDEX FK_sce_rsps_classifier (sce_rsp_id),
-        INDEX FK_sce_vnfs_classifier (sce_vnf_id),
-        INDEX FK_interfaces_classifier (interface_id),
-        CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-        CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-        CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-        CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-    echo "      Adding sce_classifier_matches"
-    sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
-            uuid VARCHAR(36) NOT NULL,
-            ip_proto VARCHAR(2) NOT NULL,
-            source_ip VARCHAR(16) NOT NULL,
-            destination_ip VARCHAR(16) NOT NULL,
-            source_port VARCHAR(5) NOT NULL,
-            destination_port VARCHAR(5) NOT NULL,
-            sce_classifier_id VARCHAR(36) NOT NULL,
-            created_at DOUBLE NOT NULL,
-            modified_at DOUBLE NULL DEFAULT NULL,
-        PRIMARY KEY (uuid),
-        INDEX FK_classifiers_classifier_match (sce_classifier_id),
-        CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
-        COLLATE='utf8_general_ci'
-        ENGINE=InnoDB;"
-
-    echo "      [Adding necessary tables for VNFFG-SFC instance mapping]"
-    echo "      Adding instance_sfis"
-    sql "CREATE TABLE IF NOT EXISTS instance_sfis (
-          uuid varchar(36) NOT NULL,
-          instance_scenario_id varchar(36) NOT NULL,
-          vim_sfi_id varchar(36) DEFAULT NULL,
-          sce_rsp_hop_id varchar(36) DEFAULT NULL,
-          datacenter_id varchar(36) DEFAULT NULL,
-          datacenter_tenant_id varchar(36) DEFAULT NULL,
-          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-          error_msg varchar(1024) DEFAULT NULL,
-          vim_info text,
-          created_at double NOT NULL,
-          modified_at double DEFAULT NULL,
-          PRIMARY KEY (uuid),
-      KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
-      KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
-      KEY FK_instance_sfis_datacenters (datacenter_id),
-      KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
-      CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
-      CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
-      CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
-      CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
-      COLLATE='utf8_general_ci'
-      ENGINE=InnoDB;"
-    echo "      Adding instance_sfs"
-    sql "CREATE TABLE IF NOT EXISTS instance_sfs (
-          uuid varchar(36) NOT NULL,
-          instance_scenario_id varchar(36) NOT NULL,
-          vim_sf_id varchar(36) DEFAULT NULL,
-          sce_rsp_hop_id varchar(36) DEFAULT NULL,
-          datacenter_id varchar(36) DEFAULT NULL,
-          datacenter_tenant_id varchar(36) DEFAULT NULL,
-          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-          error_msg varchar(1024) DEFAULT NULL,
-          vim_info text,
-          created_at double NOT NULL,
-          modified_at double DEFAULT NULL,
-      PRIMARY KEY (uuid),
-      KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
-      KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
-      KEY FK_instance_sfs_datacenters (datacenter_id),
-      KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
-      CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
-      CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
-      CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
-      CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
-      COLLATE='utf8_general_ci'
-      ENGINE=InnoDB;"
-    echo "      Adding instance_classifications"
-    sql "CREATE TABLE IF NOT EXISTS instance_classifications (
-          uuid varchar(36) NOT NULL,
-          instance_scenario_id varchar(36) NOT NULL,
-          vim_classification_id varchar(36) DEFAULT NULL,
-          sce_classifier_match_id varchar(36) DEFAULT NULL,
-          datacenter_id varchar(36) DEFAULT NULL,
-          datacenter_tenant_id varchar(36) DEFAULT NULL,
-          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-          error_msg varchar(1024) DEFAULT NULL,
-          vim_info text,
-          created_at double NOT NULL,
-          modified_at double DEFAULT NULL,
-      PRIMARY KEY (uuid),
-      KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
-      KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
-      KEY FK_instance_classifications_datacenters (datacenter_id),
-      KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
-      CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
-      CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
-      CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
-      CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
-      COLLATE='utf8_general_ci'
-      ENGINE=InnoDB;"
-    echo "      Adding instance_sfps"
-    sql "CREATE TABLE IF NOT EXISTS instance_sfps (
-          uuid varchar(36) NOT NULL,
-          instance_scenario_id varchar(36) NOT NULL,
-          vim_sfp_id varchar(36) DEFAULT NULL,
-          sce_rsp_id varchar(36) DEFAULT NULL,
-          datacenter_id varchar(36) DEFAULT NULL,
-          datacenter_tenant_id varchar(36) DEFAULT NULL,
-          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-          error_msg varchar(1024) DEFAULT NULL,
-          vim_info text,
-          created_at double NOT NULL,
-          modified_at double DEFAULT NULL,
-      PRIMARY KEY (uuid),
-      KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
-      KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
-      KEY FK_instance_sfps_datacenters (datacenter_id),
-      KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
-      CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
-      CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
-      CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
-      CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
-      COLLATE='utf8_general_ci'
-      ENGINE=InnoDB;"
-
-
-    echo "      [Altering vim_actions table]"
-    sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
-}
-function downgrade_from_28(){
-    echo "      [Undo adding the VNFFG tables]"
-    echo "      Dropping instance_sfps"
-    sql "DROP TABLE IF EXISTS instance_sfps;"
-    echo "      Dropping sce_classifications"
-    sql "DROP TABLE IF EXISTS instance_classifications;"
-    echo "      Dropping instance_sfs"
-    sql "DROP TABLE IF EXISTS instance_sfs;"
-    echo "      Dropping instance_sfis"
-    sql "DROP TABLE IF EXISTS instance_sfis;"
-    echo "      Dropping sce_classifier_matches"
-    echo "      [Undo adding the VNFFG-SFC instance mapping tables]"
-    sql "DROP TABLE IF EXISTS sce_classifier_matches;"
-    echo "      Dropping sce_classifiers"
-    sql "DROP TABLE IF EXISTS sce_classifiers;"
-    echo "      Dropping sce_rsp_hops"
-    sql "DROP TABLE IF EXISTS sce_rsp_hops;"
-    echo "      Dropping sce_rsps"
-    sql "DROP TABLE IF EXISTS sce_rsps;"
-    echo "      Dropping sce_vnffgs"
-    sql "DROP TABLE IF EXISTS sce_vnffgs;"
-    echo "      [Altering vim_actions table]"
-    sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
-    sql "DELETE FROM schema_version WHERE version_int='28';"
-}
-function upgrade_to_29(){
-    echo "      Change 'member_vnf_index' from int to str at 'sce_vnfs'"
-    sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index VARCHAR(255) NULL DEFAULT NULL AFTER uuid;"
-    echo "      Add osm_id to 'nets's and 'sce_nets'"
-    sql "ALTER TABLE nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
-    sql "ALTER TABLE sce_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (29, '0.29', '0.5.59', 'Change member_vnf_index to str accordingly to the model', '2018-04-11');"
-}
-function downgrade_from_29(){
-    echo "      Change back 'member_vnf_index' from str to int at 'sce_vnfs'"
-    sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index SMALLINT NULL DEFAULT NULL AFTER uuid;"
-    echo "      Remove osm_id from 'nets's and 'sce_nets'"
-    sql "ALTER TABLE nets DROP COLUMN osm_id;"
-    sql "ALTER TABLE sce_nets DROP COLUMN osm_id;"
-    sql "DELETE FROM schema_version WHERE version_int='29';"
-}
-function upgrade_to_30(){
-    echo "      Add 'image_list' at 'vms' to allocate alternative images"
-    sql "ALTER TABLE vms ADD COLUMN image_list TEXT NULL COMMENT 'Alternative images' AFTER image_id;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (30, '0.30', '0.5.60', 'Add image_list to vms', '2018-04-24');"
-}
-function downgrade_from_30(){
-    echo "      Remove back 'image_list' from 'vms' to allocate alternative images"
-    sql "ALTER TABLE vms DROP COLUMN image_list;"
-    sql "DELETE FROM schema_version WHERE version_int='30';"
-}
-function upgrade_to_31(){
-    echo "      Add 'vim_network_name' at 'sce_nets'"
-    sql "ALTER TABLE sce_nets ADD COLUMN vim_network_name VARCHAR(255) NULL DEFAULT NULL AFTER description;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (31, '0.31', '0.5.61', 'Add vim_network_name to sce_nets', '2018-05-03');"
-}
-function downgrade_from_31(){
-    echo "      Remove back 'vim_network_name' from 'sce_nets'"
-    sql "ALTER TABLE sce_nets DROP COLUMN vim_network_name;"
-    sql "DELETE FROM schema_version WHERE version_int='31';"
-}
-function upgrade_to_32(){
-    echo "      Add 'vim_name' to 'instance_vms'"
-    sql "ALTER TABLE instance_vms ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_vm_id;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (32, '0.32', '0.5.70', 'Add vim_name to instance vms', '2018-06-28');"
-}
-function downgrade_from_32(){
-    echo "      Remove back 'vim_name' from 'instance_vms'"
-    sql "ALTER TABLE instance_vms DROP COLUMN vim_name;"
-    sql "DELETE FROM schema_version WHERE version_int='32';"
-}
-
-function upgrade_to_33(){
-    echo "      Add PDU information to 'vms'"
-    sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
-    sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
-}
-function downgrade_from_33(){
-    echo "      Remove back PDU information from 'vms'"
-    sql "ALTER TABLE vms DROP COLUMN pdu_type;"
-    sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
-    sql "DELETE FROM schema_version WHERE version_int='33';"
-}
-function upgrade_to_X(){
-    echo "      change 'datacenter_nets'"
-    sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
-}
-function downgrade_from_X(){
-    echo "      Change back 'datacenter_nets'"
-    sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
-}
-function upgrade_to_34() {
-    echo "      Create databases required for WIM features"
-    script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
-    sql "source ${script}"
-}
-function downgrade_from_34() {
-    echo "      Drop databases required for WIM features"
-    script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
-    sql "source ${script}"
-}
-function upgrade_to_35(){
-    echo "      Create databases required for WIM features"
-    script="$(find "${DBUTILS}/migrations/up" -iname "35*.sql" | tail -1)"
-    sql "source ${script}"
-}
-function downgrade_from_35(){
-    echo "      Drop databases required for WIM features"
-    script="$(find "${DBUTILS}/migrations/down" -iname "35*.sql" | tail -1)"
-    sql "source ${script}"
-}
-function upgrade_to_36(){
-    echo "      Allow null for image_id at 'vms'"
-    sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
-    sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NULL COMMENT 'Link to image table' AFTER " \
-        "flavor_id;"
-    echo "      Enlarge config at 'wims' and 'wim_accounts'"
-    sql "ALTER TABLE wims CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER wim_url;"
-    sql "ALTER TABLE wim_accounts CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER password;"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
-         "VALUES (36, '0.36', '0.6.03', 'Allow vm without image_id for PDUs', '2018-12-19');"
-}
-function downgrade_from_36(){
-    echo "      Force back not null for image_id at 'vms'"
-    sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
-    sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER " \
-        "flavor_id;"
-    # For downgrade do not restore wims/wim_accounts config to varchar 4000
-    sql "DELETE FROM schema_version WHERE version_int='36';"
-}
-function upgrade_to_37(){
-    echo "      Adding the enum tags for SFC"
-    sql "ALTER TABLE vim_wim_actions " \
-        "MODIFY COLUMN item " \
-        "ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces'," \
-            "'instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') " \
-        "NOT NULL COMMENT 'table where the item is stored';"
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
-         "VALUES (37, '0.37', '0.6.09', 'Adding the enum tags for SFC', '2019-02-07');"
-}
-function downgrade_from_37(){
-    echo "      Adding the enum tags for SFC isn't going to be reversed"
-    # It doesn't make sense to reverse to a bug state.
-    sql "DELETE FROM schema_version WHERE version_int='37';"
-}
-function upgrade_to_38(){
-    echo "      Change vim_wim_actions, add worker, related"
-    sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
-           "ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
-           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
-           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
-       sql "UPDATE vim_wim_actions set related=item_id;"
-       echo "      Change DONE to FINISHED when DELETE has been completed"
-       sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
-           "v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
-        "SET v1.status='FINISHED', v2.status='FINISHED';"
-    echo "      Add osm_id to instance_nets"
-    sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
-    echo "      Add related to instance_xxxx"
-    for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
-        instance_vms
-    do
-        sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
-       sql "UPDATE $table set related=uuid;"
-    done
-    sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
-       sql "UPDATE instance_wim_nets set related=uuid;"
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
-        "VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
-
-}
-function downgrade_from_38(){
-    echo "      Change vim_wim_actions, delete worker, related"
-       sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
-    sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
-           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
-           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
-    echo "      Remove related from instance_xxxx"
-    for table in instance_classifications instance_nets instance_wim_nets instance_sfis instance_sfps instance_sfs \
-        instance_vms
-    do
-        sql "ALTER TABLE $table DROP COLUMN related;"
-    done
-    echo "      Remove osm_id from instance_nets"
-    sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
-    sql "DELETE FROM schema_version WHERE version_int='38';"
-}
-
-function upgrade_to_39(){
-    echo "      Enlarge vim_id to 300 at all places"
-    sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
-    sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
-    sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NOT NULL AFTER name;"
-    sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(300)" \
-        " NULL DEFAULT NULL AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(300) NULL DEFAULT " \
-        " NULL AFTER interface_id;"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NULL DEFAULT NULL" \
-        " AFTER osm_id;"
-    sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(300) NULL DEFAULT NULL" \
-        " AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(300) NULL DEFAULT NULL" \
-        " AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(300) NULL DEFAULT NULL" \
-        " AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(300) NULL DEFAULT NULL" \
-        " AFTER instance_vnf_id, DROP INDEX vim_vm_id;"
-    sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(300) NULL DEFAULT NULL" \
-        " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
-    sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(300) NULL DEFAULT NULL" \
-        " AFTER datacenter_vim_id;"
-
-    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
-        "VALUES (39, '0.39', '0.6.20', 'Enlarge vim_id to 300 at all places', '2019-05-23');"
-}
-function downgrade_from_39(){
-    echo "      Set vim_id to original lenght at all places"
-    sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
-    sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
-    sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL AFTER name;"
-    sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(36)" \
-        " NULL DEFAULT NULL AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT " \
-        " NULL AFTER interface_id;"
-    sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL" \
-        " AFTER osm_id;"
-    sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(36) NULL DEFAULT NULL" \
-        " AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(36) NULL DEFAULT NULL" \
-        " AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(36) NULL DEFAULT NULL" \
-        " AFTER instance_scenario_id;"
-    sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NULL DEFAULT NULL" \
-        " AFTER instance_vnf_id, ADD UNIQUE INDEX vim_vm_id (vim_vm_id);"
-    sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(128) NULL DEFAULT NULL" \
-        " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
-    sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(64) NULL DEFAULT NULL" \
-        " AFTER datacenter_vim_id;"
-
-    sql "DELETE FROM schema_version WHERE version_int='39';"
-}
-#TODO ... put functions here
-
-
-function del_schema_version_process()
-{
-    echo "DELETE FROM schema_version WHERE version_int='0';" | $DBCMD ||
-        ! echo "    ERROR writing on schema_version" >&2 || exit 1
-}
-
-function set_schema_version_process()
-{
-    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES "\
-        "(0, '0.0', '0.0.0', 'migration from $DATABASE_VER_NUM to $DB_VERSION backup: $BACKUP_FILE',"\
-        "'$(date +%Y-%m-%d)');" | $DBCMD ||
-        ! echo  "    Cannot set database at migration process writing into schema_version" >&2 || exit 1
-
-}
-
-function rollback_db()
-{
-    if echo $DATABASE_PROCESS | grep -q init ; then   # Empty database. No backup needed
-        echo "    Aborted! Rollback database not needed" && exit 1
-    else   # migration a non empty database or Recovering a migration process
-        cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM && echo "    Aborted! Rollback database OK" &&
-            del_schema_version_process && rm -f "$BACKUP_FILE" && exit 1
-        echo "    Aborted! Rollback database FAIL" && exit 1
-    fi
-}
-
-function sql()    # send a sql command
-{
-    echo "$*" | $DBCMD || ! echo "    ERROR with command '$*'" || rollback_db
-    return 0
-}
-
-function migrate()
-{
-    #UPGRADE DATABASE step by step
-    while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
-    do
-        echo "    upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
-        DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
-        upgrade_to_${DATABASE_VER_NUM}
-        #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
-        #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
-        #$FILE_ || exit -1  # if fail return
-    done
-
-    #DOWNGRADE DATABASE step by step
-    while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
-    do
-        echo "    downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
-        #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
-        #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
-        #$FILE_ || exit -1  # if fail return
-        downgrade_from_${DATABASE_VER_NUM}
-        DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
-    done
-}
-
-
-# check if current database is ok
-function check_migration_needed()
-{
-    DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` ||
-    ! echo "    ERROR cannot read from schema_version" || exit 1
-
-    if [[ -z "$DATABASE_VER_NUM" ]] || [[ "$DATABASE_VER_NUM" -lt 0 ]] || [[ "$DATABASE_VER_NUM" -gt 100 ]] ; then
-        echo "    Error can not get database version ($DATABASE_VER_NUM?)" >&2
-        exit 1
-    fi
-
-    [[ $DB_VERSION -eq $DATABASE_VER_NUM ]] && echo "    current database version '$DATABASE_VER_NUM' is ok" && return 1
-    [[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ]] &&
-        echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
-        exit 1
-    return 0
-}
-
-DATABASE_PROCESS=`echo "select comments from schema_version where version_int=0;" | $DBCMD | tail -n+2` ||
-    ! echo "    ERROR cannot read from schema_version" || exit 1
-if [[ -z "$DATABASE_PROCESS" ]] ; then  # migration a non empty database
-    check_migration_needed || exit 0
-    # Create a backup database content
-    [[ -n "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q  "${BACKUP_DIR}/backupdb.XXXXXX.sql")
-    [[ -z "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")
-    mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $BACKUP_FILE ||
-        ! echo "Cannot create Backup file '$BACKUP_FILE'" >&2 || exit 1
-    echo "    Backup file '$BACKUP_FILE' created"
-    # Set schema version
-    set_schema_version_process
-    migrate
-    del_schema_version_process
-    rm -f "$BACKUP_FILE"
-elif echo $DATABASE_PROCESS | grep -q init ; then   # Empty database. No backup needed
-    echo "    Migrating an empty database"
-    if check_migration_needed ; then
-        migrate
-    fi
-    del_schema_version_process
-
-else  # Recover Migration process
-    BACKUP_FILE=${DATABASE_PROCESS##*backup: }
-    [[ -f "$BACKUP_FILE" ]] || ! echo "Previous migration process fail and cannot recover backup file '$BACKUP_FILE'" >&2 ||
-        exit 1
-    echo "    Previous migration was killed. Restoring database from rollback file'$BACKUP_FILE'"
-    cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM || ! echo "    Cannot load backup file '$BACKUP_FILE'" >&2 || exit 1
-    if check_migration_needed ; then
-        set_schema_version_process
-        migrate
-    fi
-    del_schema_version_process
-    rm -f "$BACKUP_FILE"
-fi
-exit 0
-
-#echo done
-
diff --git a/database_utils/migrations/down/34_remove_wim_tables.sql b/database_utils/migrations/down/34_remove_wim_tables.sql
deleted file mode 100644 (file)
index 4400e39..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
---
--- Tear down database structure required for integrating OSM with
--- Wide Are Network Infrastructure Managers
---
-
-DROP TABLE IF EXISTS wim_port_mappings;
-DROP TABLE IF EXISTS wim_nfvo_tenants;
-DROP TABLE IF EXISTS instance_wim_nets;
-
-ALTER TABLE `vim_wim_actions` DROP FOREIGN KEY `FK_actions_wims`;
-ALTER TABLE `vim_wim_actions` DROP INDEX `FK_actions_wims`;
-ALTER TABLE `vim_wim_actions` DROP INDEX `item_type_id`;
-ALTER TABLE `vim_wim_actions` MODIFY `item` enum(
-  'datacenters_flavors',
-  'datacenter_images',
-  'instance_nets',
-  'instance_vms',
-  'instance_interfaces',
-  'instance_sfis',
-  'instance_sfs',
-  'instance_classifications',
-  'instance_sfps') NOT NULL
-  COMMENT 'table where the item is stored';
-ALTER TABLE `vim_wim_actions` MODIFY `datacenter_vim_id` varchar(36) NOT NULL;
-ALTER TABLE `vim_wim_actions` DROP `wim_internal_id`, DROP `wim_account_id`;
-ALTER TABLE `vim_wim_actions` RENAME TO `vim_actions`;
-
-DROP TABLE IF EXISTS wim_accounts;
-DROP TABLE IF EXISTS wims;
-
-DELETE FROM schema_version WHERE version_int='34';
diff --git a/database_utils/migrations/down/35_remove_sfc_ingress_and_egress.sql b/database_utils/migrations/down/35_remove_sfc_ingress_and_egress.sql
deleted file mode 100644 (file)
index 01f38f4..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
---
--- Removing ingress and egress ports for SFC purposes.
--- Inserting only one port for ingress and egress.
---
-
-ALTER TABLE sce_rsp_hops
-  DROP FOREIGN KEY FK_interfaces_rsp_hop_ingress,
-  CHANGE COLUMN ingress_interface_id interface_id VARCHAR(36) NOT NULL
-    AFTER if_order,
-  ADD CONSTRAINT FK_interfaces_rsp_hop
-    FOREIGN KEY (interface_id)
-    REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-  DROP FOREIGN KEY FK_interfaces_rsp_hop_egress,
-  DROP COLUMN egress_interface_id;
-
-DELETE FROM schema_version WHERE version_int='35';
diff --git a/database_utils/migrations/up/34_add_wim_tables.sql b/database_utils/migrations/up/34_add_wim_tables.sql
deleted file mode 100644 (file)
index 343f370..0000000
+++ /dev/null
@@ -1,169 +0,0 @@
---
--- Setup database structure required for integrating OSM with
--- Wide Are Network Infrastructure Managers
---
-
-DROP TABLE IF EXISTS wims;
-CREATE TABLE wims (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) NOT NULL,
-  `description` varchar(255) DEFAULT NULL,
-  `type` varchar(36) NOT NULL DEFAULT 'odl',
-  `wim_url` varchar(150) NOT NULL,
-  `config` varchar(4000) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `name` (`name`)
-)
-ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
-COMMENT='WIMs managed by the NFVO.';
-
-DROP TABLE IF EXISTS wim_accounts;
-CREATE TABLE wim_accounts (
-  `uuid` varchar(36) NOT NULL,
-  `name` varchar(255) DEFAULT NULL,
-  `wim_id` varchar(36) NOT NULL,
-  `created` enum('true','false') NOT NULL DEFAULT 'false',
-  `user` varchar(64) DEFAULT NULL,
-  `password` varchar(64) DEFAULT NULL,
-  `config` varchar(4000) DEFAULT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `wim_name` (`wim_id`,`name`),
-  KEY `FK_wim_accounts_wims` (`wim_id`),
-  CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`)
-    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-)
-ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
-COMMENT='WIM accounts by the user';
-
-DROP TABLE IF EXISTS `wim_nfvo_tenants`;
-CREATE TABLE `wim_nfvo_tenants` (
-  `id` integer NOT NULL AUTO_INCREMENT,
-  `nfvo_tenant_id` varchar(36) NOT NULL,
-  `wim_id` varchar(36) NOT NULL,
-  `wim_account_id` varchar(36) NOT NULL,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
-  KEY `FK_wims_nfvo_tenants` (`wim_id`),
-  KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
-  KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
-  CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`)
-    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`)
-    REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`)
-    REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-)
-ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
-COMMENT='WIM accounts mapping to NFVO tenants';
-
-DROP TABLE IF EXISTS `instance_wim_nets`;
-CREATE TABLE `instance_wim_nets` (
-  `uuid` varchar(36) NOT NULL,
-  `wim_internal_id` varchar(128) DEFAULT NULL
-    COMMENT 'Internal ID used by the WIM to refer to the network',
-  `instance_scenario_id` varchar(36) DEFAULT NULL,
-  `sce_net_id` varchar(36) DEFAULT NULL,
-  `wim_id` varchar(36) DEFAULT NULL,
-  `wim_account_id` varchar(36) NOT NULL,
-  `status` enum(
-    'ACTIVE',
-    'INACTIVE',
-    'DOWN',
-    'BUILD',
-    'ERROR',
-    'WIM_ERROR',
-    'DELETED',
-    'SCHEDULED_CREATION',
-    'SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
-  `error_msg` varchar(1024) DEFAULT NULL,
-  `wim_info` text,
-  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
-  `created` enum('true','false') NOT NULL DEFAULT 'false'
-      COMMENT 'Created or already exists at WIM',
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
-  KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
-  KEY `FK_instance_wim_nets_wims` (`wim_id`),
-  KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
-  CONSTRAINT `FK_instance_wim_nets_wim_accounts`
-    FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
-  CONSTRAINT `FK_instance_wim_nets_wims`
-    FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`),
-  CONSTRAINT `FK_instance_wim_nets_instance_scenarios`
-    FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`)
-    ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_instance_wim_nets_sce_nets`
-    FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`)
-    ON DELETE SET NULL ON UPDATE CASCADE
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
-  COMMENT='Instances of wim networks';
-
-ALTER TABLE `vim_actions`
-  RENAME TO `vim_wim_actions`;
-ALTER TABLE `vim_wim_actions`
-  ADD `wim_account_id` varchar(36) DEFAULT NULL AFTER `vim_id`,
-  ADD `wim_internal_id` varchar(64) DEFAULT NULL AFTER `wim_account_id`,
-  MODIFY `datacenter_vim_id` varchar(36) DEFAULT NULL,
-  MODIFY `item` enum(
-    'datacenters_flavors',
-    'datacenter_images',
-    'instance_nets',
-    'instance_vms',
-    'instance_interfaces',
-    'instance_sfis',
-    'instance_sfs',
-    'instance_classifications',
-    'instance_sfps',
-    'instance_wim_nets') NOT NULL
-  COMMENT 'table where the item is stored';
-ALTER TABLE `vim_wim_actions`
-  ADD INDEX `item_type_id` (`item`, `item_id`);
-ALTER TABLE `vim_wim_actions`
-  ADD INDEX `FK_actions_wims` (`wim_account_id`);
-ALTER TABLE `vim_wim_actions`
-  ADD CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`)
-  REFERENCES `wim_accounts` (`uuid`)
-  ON UPDATE CASCADE ON DELETE CASCADE;
-
-DROP TABLE IF EXISTS `wim_port_mappings`;
-CREATE TABLE `wim_port_mappings` (
-  `id` integer NOT NULL AUTO_INCREMENT,
-  `wim_id` varchar(36) NOT NULL,
-  `datacenter_id` varchar(36) NOT NULL,
-  `pop_switch_dpid` varchar(64) NOT NULL,
-  `pop_switch_port` varchar(64) NOT NULL,
-  `wan_service_endpoint_id` varchar(256) NOT NULL
-      COMMENT 'In case the WIM plugin relies on the wan_service_mapping_info'
-      COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
-      /* In other words: wan_service_endpoint_id = f(wan_service_mapping_info)
-       * where f is a injective function'
-       */
-  `wan_service_mapping_info` text,
-  `created_at` double NOT NULL,
-  `modified_at` double DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `unique_datacenter_port_mapping`
-    (`datacenter_id`, `pop_switch_dpid`, `pop_switch_port`),
-  UNIQUE KEY `unique_wim_port_mapping`
-    (`wim_id`, `wan_service_endpoint_id`),
-  KEY `FK_wims_wim_physical_connections` (`wim_id`),
-  KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
-  CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`)
-    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
-  CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`)
-    REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
-)
-ENGINE=InnoDB DEFAULT CHARSET=utf8
-COMMENT='WIM port mappings managed by the WIM.';
-
--- Update Schema with DB version
-INSERT INTO schema_version
-VALUES (34, '0.34', '0.6.00', 'Added WIM tables', '2018-09-10');
diff --git a/database_utils/migrations/up/35_add_sfc_ingress_and_egress.sql b/database_utils/migrations/up/35_add_sfc_ingress_and_egress.sql
deleted file mode 100644 (file)
index b528c6d..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
---
--- Adding different ingress and egress ports for SFC.
---
-
-ALTER TABLE sce_rsp_hops
-  DROP FOREIGN KEY FK_interfaces_rsp_hop,
-  CHANGE COLUMN interface_id ingress_interface_id VARCHAR(36) NOT NULL
-    AFTER if_order,
-  ADD CONSTRAINT FK_interfaces_rsp_hop_ingress
-    FOREIGN KEY (ingress_interface_id)
-    REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
-  ADD COLUMN egress_interface_id VARCHAR(36) NULL DEFAULT NULL
-    AFTER ingress_interface_id;
-
-UPDATE sce_rsp_hops
-  SET egress_interface_id = ingress_interface_id;
-
-ALTER TABLE sce_rsp_hops
-  ALTER COLUMN egress_interface_id DROP DEFAULT;
-
-ALTER TABLE sce_rsp_hops
-  MODIFY COLUMN egress_interface_id VARCHAR(36) NOT NULL
-    AFTER ingress_interface_id,
-  ADD CONSTRAINT FK_interfaces_rsp_hop_egress
-    FOREIGN KEY (egress_interface_id)
-    REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE;
-
-INSERT INTO schema_version (version_int, version, openmano_ver, comments, date)
-  VALUES (35, '0.35', '0.6.02', 'Adding ingress and egress ports for RSPs', '2018-12-11');
index cc1cfc0..2bc3e4d 100755 (executable)
@@ -1,8 +1,22 @@
 #!/bin/sh
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 rm -rf pool
 rm -rf dists
 mkdir -p pool/RO
-mv .build/*.deb pool/RO/
+mv deb_dist/*.deb pool/RO/
 mkdir -p dists/unstable/RO/binary-amd64/
 apt-ftparchive packages pool/RO > dists/unstable/RO/binary-amd64/Packages
 gzip -9fk dists/unstable/RO/binary-amd64/Packages
index 25de71a..b1640eb 100755 (executable)
@@ -1,4 +1,53 @@
-#!/bin/sh
-make clean all BRANCH=master
-#make install && \
-#make test
+#!/bin/bash
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+rm -rf deb_dist/*
+mkdir -p deb_dist
+
+# main RO module
+make -C RO clean package BRANCH=master
+cp RO/deb_dist/python3-osm-ro_*.deb deb_dist/
+
+# RO client
+make -C RO-client clean package
+cp RO-client/deb_dist/python3-osm-roclient_*.deb deb_dist/
+
+# VIM vmware plugin
+make -C RO-VIM-vmware clean package
+cp RO-VIM-vmware/deb_dist/python3-osm-rovim-vmware_*.deb deb_dist/
+
+# VIM Openstack plugin
+make -C RO-VIM-openstack clean package
+cp RO-VIM-openstack/deb_dist/python3-osm-rovim-openstack_*.deb deb_dist/
+
+# VIM Openvim plugin
+make -C RO-VIM-openvim clean package
+cp RO-VIM-openvim/deb_dist/python3-osm-rovim-openvim_*.deb deb_dist/
+
+# VIM AWS plugin
+make -C RO-VIM-aws clean package
+cp RO-VIM-aws/deb_dist/python3-osm-rovim-aws_*.deb deb_dist/
+
+# VIM fos plugin
+make -C RO-VIM-fos clean package
+cp RO-VIM-fos/deb_dist/python3-osm-rovim-fos_*.deb deb_dist/
+
+# VIM azure plugin
+make -C RO-VIM-azure clean package
+cp RO-VIM-azure/deb_dist/python3-osm-rovim-azure_*.deb deb_dist/
+
+# VIM Opennebula plugin
+make -C RO-VIM-opennebula clean package
+cp RO-VIM-opennebula/deb_dist/python3-osm-rovim-opennebula_*.deb deb_dist/
index cb72fb1..13cef85 100755 (executable)
@@ -13,6 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-flake8 osm_ro/wim osm_ro/vim_thread.py --max-line-length 120 \
+flake8 RO/osm_ro/wim RO/osm_ro/vim_thread.py --max-line-length 120 \
     --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
 
diff --git a/docker/Dockerfile-local b/docker/Dockerfile-local
deleted file mode 100644 (file)
index 1dff02d..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-##
-# Copyright {yyyy} {name of copyright owner}
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-########################################################################
-
-from ubuntu:18.04
-
-LABEL authors="Gennadiy Dubina, Alfonso Tierno, Gerardo Garcia"
-
-RUN apt-get update && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
-    DEBIAN_FRONTEND=noninteractive apt-get update && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install git python python-pip && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
-    DEBIAN_FRONTEND=noninteractive pip2 install pip==9.0.3 && \
-    DEBIAN_FRONTEND=noninteractive pip2 install -U progressbar pyvmomi pyvcloud==19.1.1 && \
-    DEBIAN_FRONTEND=noninteractive pip2 install -U fog05rest && \
-    DEBIAN_FRONTEND=noninteractive pip2 install -U azure && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install python-requests && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient python-networking-l2gw && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install python-networkx && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install genisoimage && \
-    DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
-    DEBIAN_FRONTEND=noninteractive pip2 install pyone && \
-    DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
-    DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-client
-
-COPY . /root/RO
-
-RUN /root/RO/scripts/install-osm-im.sh --develop && \
-    /root/RO/scripts/install-lib-osm-openvim.sh --develop && \
-    make -C /root/RO prepare && \
-    mkdir -p /var/log/osm && \
-    pip2 install -e /root/RO/build && \
-    rm -rf /root/.cache && \
-    apt-get clean && \
-    rm -rf /var/lib/apt/lists/*
-
-VOLUME /var/log/osm
-
-EXPOSE 9090
-
-# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers
-# These ENV must be provided
-# RO_DB_HOST: host of the main
-# RO_DB_OVIM_HOST: ...        if empty RO_DB_HOST is assumed
-# RO_DB_ROOT_PASSWORD: this has to be provided first time for creating database. It will create and init only if empty!
-# RO_DB_OVIM_ROOT_PASSWORD: ...  if empty RO_DB_ROOT_PASSWORD is assumed
-# RO_DB_USER:    default value 'mano'
-# RO_DB_OVIM_USER:       default value 'mano'
-# RO_DB_PASSWORD:        default value 'manopw'
-# RO_DB_OVIM_PASSWORD:        default value 'manopw'
-# RO_DB_PORT:             default value '3306'
-# RO_DB_OVIM_PORT:        default value '3306'
-# RO_DB_NAME:             default value 'mano_db'
-# RO_DB_OVIM_NAME:        default value 'mano_vim_db'
-# RO_LOG_FILE:            default log to stderr if not defined
-
-ENV RO_DB_HOST="" \
-    RO_DB_OVIM_HOST="" \
-    RO_DB_ROOT_PASSWORD="" \
-    RO_DB_OVIM_ROOT_PASSWORD="" \
-    RO_DB_USER=mano \
-    RO_DB_OVIM_USER=mano \
-    RO_DB_PASSWORD=manopw \
-    RO_DB_OVIM_PASSWORD=manopw \
-    RO_DB_PORT=3306 \
-    RO_DB_OVIM_PORT=3306 \
-    RO_DB_NAME=mano_db \
-    RO_DB_OVIM_NAME=mano_vim_db \
-    OPENMANO_TENANT=osm \
-    RO_LOG_LEVEL=DEBUG
-
-CMD RO-start.sh
-
-# HEALTHCHECK --start-period=30s --interval=10s --timeout=5s --retries=12 \
-#  CMD curl --silent --fail localhost:9090/openmano/tenants || exit 1
diff --git a/openmano b/openmano
deleted file mode 100755 (executable)
index 13a93da..0000000
--- a/openmano
+++ /dev/null
@@ -1,2502 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# PYTHON_ARGCOMPLETE_OK
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-openmano client used to interact with openmano-server (openmanod)
-"""
-__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
-__date__ = "$09-oct-2014 09:09:48$"
-__version__ = "0.4.24-r534"
-version_date = "Nov 2018"
-
-from argcomplete.completers import FilesCompleter
-import os
-import argparse
-import argcomplete
-import requests
-import json
-import yaml
-import logging
-#from jsonschema import validate as js_v, exceptions as js_e
-
-class ArgumentParserError(Exception): pass
-
-class OpenmanoCLIError(Exception): pass
-
-class ThrowingArgumentParser(argparse.ArgumentParser):
-    def error(self, message):
-        print "Error: %s" %message
-        print
-        self.print_usage()
-        #self.print_help()
-        print
-        print "Type 'openmano -h' for help"
-        raise ArgumentParserError
-
-
-def config(args):
-    print "OPENMANO_HOST: %s" %mano_host
-    print "OPENMANO_PORT: %s" %mano_port
-    if args.n:
-        logger.debug("resolving tenant and datacenter names")
-        mano_tenant_id = "None"
-        mano_tenant_name = "None"
-        mano_datacenter_id = "None"
-        mano_datacenter_name = "None"
-        # WIM additions
-        logger.debug("resolving WIM names")
-        mano_wim_id = "None"
-        mano_wim_name = "None"
-        try:
-            mano_tenant_id = _get_item_uuid("tenants", mano_tenant)
-            URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, mano_tenant_id)
-            mano_response = requests.get(URLrequest)
-            logger.debug("openmano response: %s", mano_response.text )
-            content = mano_response.json()
-            mano_tenant_name = content["tenant"]["name"]
-            URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, mano_tenant_id, mano_datacenter)
-            mano_response = requests.get(URLrequest)
-            logger.debug("openmano response: %s", mano_response.text )
-            content = mano_response.json()
-            if "error" not in content:
-                mano_datacenter_id = content["datacenter"]["uuid"]
-                mano_datacenter_name = content["datacenter"]["name"]
-
-            # WIM
-            URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (
-            mano_host, mano_port, mano_tenant_id, mano_wim)
-            mano_response = requests.get(URLrequest)
-            logger.debug("openmano response: %s", mano_response.text)
-            content = mano_response.json()
-            if "error" not in content:
-                mano_wim_id = content["wim"]["uuid"]
-                mano_wim_name = content["wim"]["name"]
-
-        except OpenmanoCLIError:
-            pass
-        print "OPENMANO_TENANT: %s" %mano_tenant
-        print "    Id: %s" %mano_tenant_id
-        print "    Name: %s" %mano_tenant_name
-        print "OPENMANO_DATACENTER: %s" %str (mano_datacenter)
-        print "    Id: %s" %mano_datacenter_id
-        print "    Name: %s" %mano_datacenter_name
-        # WIM
-        print "OPENMANO_WIM: %s" %str (mano_wim)
-        print "    Id: %s" %mano_wim_id
-        print "    Name: %s" %mano_wim_name
-
-    else:
-        print "OPENMANO_TENANT: %s" %mano_tenant
-        print "OPENMANO_DATACENTER: %s" %str (mano_datacenter)
-        # WIM
-        print "OPENMANO_WIM: %s" %str (mano_wim)
-
-def _print_verbose(mano_response, verbose_level=0):
-    content = mano_response.json()
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    if type(content)!=dict or len(content)!=1:
-        #print "Non expected format output"
-        print str(content)
-        return result
-
-    val=content.values()[0]
-    if type(val)==str:
-        print val
-        return result
-    elif type(val) == list:
-        content_list = val
-    elif type(val)==dict:
-        content_list = [val]
-    else:
-        #print "Non expected dict/list format output"
-        print str(content)
-        return result
-
-    #print content_list
-    if verbose_level==None:
-        verbose_level=0
-    if verbose_level >= 3:
-        print yaml.safe_dump(content, indent=4, default_flow_style=False)
-        return result
-
-    if mano_response.status_code == 200:
-        uuid = None
-        for content in content_list:
-            if "uuid" in content:
-                uuid = content['uuid']
-            elif "id" in content:
-                uuid = content['id']
-            elif "vim_id" in content:
-                uuid = content['vim_id']
-            name = content.get('name');
-            if not uuid:
-                uuid = ""
-            if not name:
-                name = ""
-            myoutput = "{:38} {:20}".format(uuid, name)
-            if content.get("status"):
-                myoutput += " {:20}".format(content['status'])
-            elif "enabled" in content and not content["enabled"]:
-                myoutput += " enabled=False".ljust(20)
-            if verbose_level >=1:
-                if content.get('created_at'):
-                    myoutput += " {:20}".format(content['created_at'])
-                if content.get('sdn_attached_ports'):
-                    #myoutput += " " + str(content['sdn_attached_ports']).ljust(20)
-                    myoutput += "\nsdn_attached_ports:\n" + yaml.safe_dump(content['sdn_attached_ports'], indent=4, default_flow_style=False)
-                if verbose_level >=2:
-                    new_line='\n'
-                    if content.get('type'):
-                        myoutput += new_line + "  Type: {:29}".format(content['type'])
-                        new_line=''
-                    if content.get('description'):
-                        myoutput += new_line + "  Description: {:20}".format(content['description'])
-            print myoutput
-    else:
-        print content['error']['description']
-    return result
-
-def parser_json_yaml(file_name):
-    try:
-        f = file(file_name, "r")
-        text = f.read()
-        f.close()
-    except Exception as e:
-        return (False, str(e))
-
-    #Read and parse file
-    if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text):
-        try:
-            config = yaml.load(text)
-        except yaml.YAMLError as exc:
-            error_pos = ""
-            if hasattr(exc, 'problem_mark'):
-                mark = exc.problem_mark
-                error_pos = " at line:%s column:%s" % (mark.line+1, mark.column+1)
-            return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos)
-    else: #json
-        try:
-            config = json.loads(text)
-        except Exception as e:
-            return (False, "Error loading file '"+file_name+"' json format error " + str(e) )
-
-    return True, config
-
-def _load_file_or_yaml(content):
-    '''
-    'content' can be or a yaml/json file or a text containing a yaml/json text format
-    This function autodetect, trying to load and parse the file,
-    if fails trying to parse the 'content' text
-    Returns the dictionary once parsed, or print an error and finish the program
-    '''
-    #Check config file exists
-    if os.path.isfile(content):
-        r,payload = parser_json_yaml(content)
-        if not r:
-            print payload
-            exit(-1)
-    elif "{" in content or ":" in content:
-        try:
-            payload = yaml.load(content)
-        except yaml.YAMLError as exc:
-            error_pos = ""
-            if hasattr(exc, 'problem_mark'):
-                mark = exc.problem_mark
-                error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
-            print "Error loading yaml/json text"+error_pos
-            exit (-1)
-    else:
-        print "'%s' is neither a valid file nor a yaml/json content" % content
-        exit(-1)
-    return payload
-
-def _get_item_uuid(item, item_name_id, tenant=None):
-    if tenant:
-        URLrequest = "http://%s:%s/openmano/%s/%s" %(mano_host, mano_port, tenant, item)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s" %(mano_host, mano_port, item)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    content = mano_response.json()
-    #print content
-    found = 0
-    for i in content[item]:
-        if i["uuid"] == item_name_id:
-            return item_name_id
-        if i["name"] == item_name_id:
-            uuid = i["uuid"]
-            found += 1
-        if item_name_id.startswith("osm_id=") and i.get("osm_id") == item_name_id[7:]:
-            uuid = i["uuid"]
-            found += 1
-    if found == 0:
-        raise OpenmanoCLIError("No %s found with name/uuid '%s'" %(item[:-1], item_name_id))
-    elif found > 1:
-        raise OpenmanoCLIError("%d %s found with name '%s'. uuid must be used" %(found, item, item_name_id))
-    return uuid
-#
-# def check_valid_uuid(uuid):
-#     id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-#     try:
-#         js_v(uuid, id_schema)
-#         return True
-#     except js_e.ValidationError:
-#         return False
-
-def _get_tenant(tenant_name_id = None):
-    if not tenant_name_id:
-        tenant_name_id = mano_tenant
-        if not mano_tenant:
-            raise OpenmanoCLIError("'OPENMANO_TENANT' environment variable is not set")
-    return _get_item_uuid("tenants", tenant_name_id)
-
-def _get_datacenter(datacenter_name_id = None, tenant = "any"):
-    if not datacenter_name_id:
-        datacenter_name_id = mano_datacenter
-        if not datacenter_name_id:
-            raise OpenmanoCLIError("neither 'OPENMANO_DATACENTER' environment variable is set nor --datacenter option is used")
-    return _get_item_uuid("datacenters", datacenter_name_id, tenant)
-
-# WIM
-def _get_wim(wim_name_id = None, tenant = "any"):
-    if not wim_name_id:
-        wim_name_id = mano_wim
-        if not wim_name_id:
-            raise OpenmanoCLIError("neither 'OPENMANO_WIM' environment variable is set nor --wim option is used")
-    return _get_item_uuid("wims", wim_name_id, tenant)
-
-def vnf_create(args):
-    #print "vnf-create",args
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    tenant = _get_tenant()
-    myvnf = _load_file_or_yaml(args.file)
-    api_version = ""
-    if "vnfd:vnfd-catalog" in myvnf or "vnfd-catalog" in myvnf:
-        api_version = "/v3"
-        token = "vnfd"
-        vnfd_catalog = myvnf.get("vnfd:vnfd-catalog")
-        if not vnfd_catalog:
-            vnfd_catalog = myvnf.get("vnfd-catalog")
-        vnfds = vnfd_catalog.get("vnfd:vnfd")
-        if not vnfds:
-            vnfds = vnfd_catalog.get("vnfd")
-        vnfd = vnfds[0]
-        vdu_list = vnfd.get("vdu")
-
-    else:  # old API
-        api_version = ""
-        token = "vnfs"
-        vnfd = myvnf['vnf']
-        vdu_list = vnfd.get("VNFC")
-
-    if args.name or args.description or args.image_path or args.image_name or args.image_checksum:
-        # TODO, change this for API v3
-        # print args.name
-        try:
-            if args.name:
-                vnfd['name'] = args.name
-            if args.description:
-                vnfd['description'] = args.description
-            if vdu_list:
-                if args.image_path:
-                    index = 0
-                    for image_path_ in args.image_path.split(","):
-                        # print "image-path", image_path_
-                        if api_version == "/v3":
-                            if vdu_list[index].get("image"):
-                                vdu_list[index]['image'] = image_path_
-                                if "image-checksum" in vdu_list[index]:
-                                    del vdu_list[index]["image-checksum"]
-                            else:  # image name in volumes
-                                vdu_list[index]["volumes"][0]["image"] = image_path_
-                                if "image-checksum" in vdu_list[index]["volumes"][0]:
-                                    del vdu_list[index]["volumes"][0]["image-checksum"]
-                        else:
-                            vdu_list[index]['VNFC image'] = image_path_
-                            if "image name" in vdu_list[index]:
-                                del vdu_list[index]["image name"]
-                            if "image checksum" in vdu_list[index]:
-                                del vdu_list[index]["image checksum"]
-                        index += 1
-                if args.image_name:  # image name precedes if both are supplied
-                    index = 0
-                    for image_name_ in args.image_name.split(","):
-                        if api_version == "/v3":
-                            if vdu_list[index].get("image"):
-                                vdu_list[index]['image'] = image_name_
-                                if "image-checksum" in vdu_list[index]:
-                                    del vdu_list[index]["image-checksum"]
-                                if vdu_list[index].get("alternative-images"):
-                                    for a_image in vdu_list[index]["alternative-images"]:
-                                        a_image['image'] = image_name_
-                                        if "image-checksum" in a_image:
-                                            del a_image["image-checksum"]
-                            else:  # image name in volumes
-                                vdu_list[index]["volumes"][0]["image"] = image_name_
-                                if "image-checksum" in vdu_list[index]["volumes"][0]:
-                                    del vdu_list[index]["volumes"][0]["image-checksum"]
-                        else:
-                            vdu_list[index]['image name'] = image_name_
-                            if "VNFC image" in vdu_list[index]:
-                                del vdu_list[index]["VNFC image"]
-                        index += 1
-                if args.image_checksum:
-                    index = 0
-                    for image_checksum_ in args.image_checksum.split(","):
-                        if api_version == "/v3":
-                            if vdu_list[index].get("image"):
-                                vdu_list[index]['image-checksum'] = image_checksum_
-                                if vdu_list[index].get("alternative-images"):
-                                    for a_image in vdu_list[index]["alternative-images"]:
-                                        a_image['image-checksum'] = image_checksum_
-                            else:  # image name in volumes
-                                vdu_list[index]["volumes"][0]["image-checksum"] = image_checksum_
-                        else:
-                            vdu_list[index]['image checksum'] = image_checksum_
-                        index += 1
-        except (KeyError, TypeError), e:
-            if str(e) == 'vnf':           error_pos= "missing field 'vnf'"
-            elif str(e) == 'name':        error_pos= "missing field  'vnf':'name'"
-            elif str(e) == 'description': error_pos= "missing field  'vnf':'description'"
-            elif str(e) == 'VNFC':        error_pos= "missing field  'vnf':'VNFC'"
-            elif str(e) == str(index):    error_pos= "field  'vnf':'VNFC' must be an array"
-            elif str(e) == 'VNFC image':  error_pos= "missing field 'vnf':'VNFC'['VNFC image']"
-            elif str(e) == 'image name':  error_pos= "missing field 'vnf':'VNFC'['image name']"
-            elif str(e) == 'image checksum':  error_pos= "missing field 'vnf':'VNFC'['image checksum']"
-            else:                       error_pos="wrong format"
-            print "Wrong VNF descriptor: " + error_pos
-            return -1
-    payload_req = json.dumps(myvnf)
-
-    #print payload_req
-
-    URLrequest = "http://{}:{}/openmano{}/{}/{token}".format(mano_host, mano_port, api_version, tenant, token=token)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-
-    return _print_verbose(mano_response, args.verbose)
-
-def vnf_list(args):
-    #print "vnf-list",args
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    if args.name:
-        toshow = _get_item_uuid("vnfs", args.name, tenant)
-        URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, tenant, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, tenant)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    content = mano_response.json()
-    # print json.dumps(content, indent=4)
-    if args.verbose==None:
-        args.verbose=0
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    if mano_response.status_code == 200:
-        if not args.name:
-            if args.verbose >= 3:
-                print yaml.safe_dump(content, indent=4, default_flow_style=False)
-                return result
-            if len(content['vnfs']) == 0:
-                print "No VNFs were found."
-                return 404   # HTTP_Not_Found
-            for vnf in content['vnfs']:
-                myoutput = "{:38} {:20}".format(vnf['uuid'], vnf['name'])
-                if vnf.get('osm_id') or args.verbose >= 1:
-                    myoutput += " osm_id={:20}".format(vnf.get('osm_id'))
-                if args.verbose >= 1:
-                    myoutput += " {}".format(vnf['created_at'])
-                print (myoutput)
-                if args.verbose >= 2:
-                    print ("  Description: {}".format(vnf['description']))
-                    # print ("  VNF descriptor file: {}".format(vnf['path']))
-        else:
-            if args.verbose:
-                print yaml.safe_dump(content, indent=4, default_flow_style=False)
-                return result
-            vnf = content['vnf']
-            print ("{:38} {:20} osm_id={:20} {:20}".format(vnf['uuid'], vnf['name'], vnf.get('osm_id'),
-                                                           vnf['created_at']))
-            print ("  Description: {}".format(vnf['description']))
-            # print "  VNF descriptor file: %s" %vnf['path']
-            print ("  VMs:")
-            for vm in vnf['VNFC']:
-                print ("    {:20} osm_id={:20} {}".format(vm['name'], vm.get('osm_id'), vm['description']))
-            if len(vnf['nets']) > 0:
-                print ("  Internal nets:")
-                for net in vnf['nets']:
-                    print ("    {:20} {}".format(net['name'], net['description']))
-            if len(vnf['external-connections']) > 0:
-                print ("  External interfaces:")
-                for interface in vnf['external-connections']:
-                    print ("    {:20} {:20} {:20} {:14}".format(
-                        interface['external_name'], interface['vm_name'],
-                        interface['internal_name'],
-                        interface.get('vpci') if interface.get('vpci') else ""))
-    else:
-        print content['error']['description']
-        if args.verbose:
-            print yaml.safe_dump(content, indent=4, default_flow_style=False)
-    return result
-
-def vnf_delete(args):
-    #print "vnf-delete",args
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    todelete = _get_item_uuid("vnfs", args.name, tenant=tenant)
-    if not args.force:
-        r = raw_input("Delete VNF %s (y/N)? " %(todelete))
-        if  not (len(r)>0  and r[0].lower()=="y"):
-            return 0
-    URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, tenant, todelete)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-def scenario_create(args):
-    # print "scenario-create",args
-    tenant = _get_tenant()
-    headers_req = {'content-type': 'application/yaml'}
-    myscenario = _load_file_or_yaml(args.file)
-    if "nsd:nsd-catalog" in myscenario or "nsd-catalog" in myscenario:
-        api_version = "/v3"
-        token = "nsd"
-        nsd_catalog = myscenario.get("nsd:nsd-catalog")
-        if not nsd_catalog:
-            nsd_catalog = myscenario.get("nsd-catalog")
-        nsds = nsd_catalog.get("nsd:nsd")
-        if not nsds:
-            nsds = nsd_catalog.get("nsd")
-        nsd = nsds[0]
-    else:  # API<v3
-        api_version = ""
-        token = "scenarios"
-        if "scenario" in myscenario:
-            nsd = myscenario["scenario"]
-        else:
-            nsd = myscenario
-    # TODO modify for API v3
-    if args.name:
-        nsd['name'] = args.name
-    if args.description:
-        nsd['description'] = args.description
-    payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False,
-                                 encoding='utf-8', allow_unicode=True)
-
-    # print payload_req
-    URLrequest = "http://{host}:{port}/openmano{api}/{tenant}/{token}".format(
-        host=mano_host, port=mano_port, api=api_version, tenant=tenant, token=token)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    return _print_verbose(mano_response, args.verbose)
-
-def scenario_list(args):
-    #print "scenario-list",args
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    if args.name:
-        toshow = _get_item_uuid("scenarios", args.name, tenant)
-        URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, tenant, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, tenant)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if args.verbose==None:
-        args.verbose=0
-
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    if mano_response.status_code == 200:
-        if not args.name:
-            if args.verbose >= 3:
-                print yaml.safe_dump(content, indent=4, default_flow_style=False)
-                return result
-            if len(content['scenarios']) == 0:
-                print "No scenarios were found."
-                return 404 #HTTP_Not_Found
-            for scenario in content['scenarios']:
-                myoutput = "{:38} {:20}".format(scenario['uuid'], scenario['name'])
-                if scenario.get('osm_id') or args.verbose >= 1:
-                    myoutput += " osm_id={:20}".format(scenario.get('osm_id'))
-                if args.verbose >= 1:
-                    myoutput += " {}".format(scenario['created_at'])
-                print (myoutput)
-                if args.verbose >=2:
-                    print ("  Description: {}".format(scenario['description']))
-        else:
-            if args.verbose:
-                print yaml.safe_dump(content, indent=4, default_flow_style=False)
-                return result
-            scenario = content['scenario']
-            print ("{:38} {:20} osm_id={:20} {:20}".format(scenario['uuid'], scenario['name'], scenario.get('osm_id'),
-                                                           scenario['created_at']))
-            print ("  Description: {}".format(scenario['description']))
-            print ("  VNFs:")
-            for vnf in scenario['vnfs']:
-                print ("    {:38} {:20} vnf_index={} {}".format(vnf['vnf_id'], vnf['name'], vnf.get("member_vnf_index"),
-                                                                vnf['description']))
-            if len(scenario['nets']) > 0:
-                print ("  nets:")
-                for net in scenario['nets']:
-                    description = net['description']
-                    if not description:   # if description does not exist, description is "-". Valid for external and internal nets.
-                        description = '-'
-                    vim_id = ""
-                    if net.get('vim_id'):
-                        vim_id = " vim_id=" + net["vim_id"]
-                    external = ""
-                    if net["external"]:
-                        external = " external"
-                    print ("    {:20} {:38} {:30}{}{}".format(net['name'], net['uuid'], description, vim_id, external))
-    else:
-        print (content['error']['description'])
-        if args.verbose:
-            print yaml.safe_dump(content, indent=4, default_flow_style=False)
-    return result
-
-def scenario_delete(args):
-    #print "scenario-delete",args
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    todelete = _get_item_uuid("scenarios", args.name, tenant=tenant)
-    if not args.force:
-        r = raw_input("Delete scenario %s (y/N)? " %(args.name))
-        if  not (len(r)>0  and r[0].lower()=="y"):
-            return 0
-    URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, tenant, todelete)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-def scenario_deploy(args):
-    print "This command is deprecated, use 'openmano instance-scenario-create --scenario %s --name %s' instead!!!" % (args.scenario, args.name)
-    print
-    args.file = None
-    args.netmap_use = None
-    args.netmap_create = None
-    args.keypair = None
-    args.keypair_auto = None
-    return instance_create(args)
-
-#     #print "scenario-deploy",args
-#     headers_req = {'content-type': 'application/json'}
-#     action = {}
-#     actionCmd="start"
-#     if args.nostart:
-#         actionCmd="reserve"
-#     action[actionCmd] = {}
-#     action[actionCmd]["instance_name"] = args.name
-#     if args.datacenter != None:
-#         action[actionCmd]["datacenter"] = args.datacenter
-#     elif mano_datacenter != None:
-#         action[actionCmd]["datacenter"] = mano_datacenter
-#
-#     if args.description:
-#         action[actionCmd]["description"] = args.description
-#     payload_req = json.dumps(action, indent=4)
-#     #print payload_req
-#
-#     URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario)
-#     logger.debug("openmano request: %s", payload_req)
-#     mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-#     logger.debug("openmano response: %s", mano_response.text )
-#     if args.verbose==None:
-#         args.verbose=0
-#
-#     result = 0 if mano_response.status_code==200 else mano_response.status_code
-#     content = mano_response.json()
-#     #print json.dumps(content, indent=4)
-#     if args.verbose >= 3:
-#         print yaml.safe_dump(content, indent=4, default_flow_style=False)
-#         return result
-#
-#     if mano_response.status_code == 200:
-#         myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20))
-#         if args.verbose >=1:
-#             myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20))
-#         if args.verbose >=2:
-#             myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30))
-#         print myoutput
-#         print ""
-#         print "To check the status, run the following command:"
-#         print "openmano instance-scenario-list <instance_id>"
-#     else:
-#         print content['error']['description']
-#     return result
-
-def scenario_verify(args):
-    #print "scenario-verify",args
-    tenant = _get_tenant()
-    headers_req = {'content-type': 'application/json'}
-    action = {}
-    action["verify"] = {}
-    action["verify"]["instance_name"] = "scen-verify-return5"
-    payload_req = json.dumps(action, indent=4)
-    #print payload_req
-
-    URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, tenant, args.scenario)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-def instance_create(args):
-    tenant = _get_tenant()
-    headers_req = {'content-type': 'application/yaml'}
-    myInstance={"instance": {}, "schema_version": "0.1"}
-    if args.file:
-        instance_dict = _load_file_or_yaml(args.file)
-        if "instance" not in instance_dict:
-            myInstance = {"instance": instance_dict, "schema_version": "0.1"}
-        else:
-            myInstance = instance_dict
-    if args.name:
-        myInstance["instance"]['name'] = args.name
-    if args.description:
-        myInstance["instance"]['description'] = args.description
-    if args.nostart:
-        myInstance["instance"]['action'] = "reserve"
-    #datacenter
-    datacenter = myInstance["instance"].get("datacenter")
-    if args.datacenter != None:
-        datacenter = args.datacenter
-    myInstance["instance"]["datacenter"] = _get_datacenter(datacenter, tenant)
-    #scenario
-    scenario = myInstance["instance"].get("scenario")
-    if args.scenario != None:
-        scenario = args.scenario
-    if not scenario:
-        print "you must provide a scenario in the file descriptor or with --scenario"
-        return -1
-    if isinstance(scenario, str):
-        myInstance["instance"]["scenario"] = _get_item_uuid("scenarios", scenario, tenant)
-    if args.netmap_use:
-        if "networks" not in myInstance["instance"]:
-            myInstance["instance"]["networks"] = {}
-        for net in args.netmap_use:
-            net_comma_list = net.split(",")
-            for net_comma in net_comma_list:
-                net_tuple = net_comma.split("=")
-                if len(net_tuple) != 2:
-                    print "error at netmap-use. Expected net-scenario=net-datacenter. (%s)?" % net_comma
-                    return
-                net_scenario   = net_tuple[0].strip()
-                net_datacenter = net_tuple[1].strip()
-                if net_scenario not in myInstance["instance"]["networks"]:
-                    myInstance["instance"]["networks"][net_scenario] = {}
-                if "sites" not in myInstance["instance"]["networks"][net_scenario]:
-                    myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
-                myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-use"] = net_datacenter
-    if args.netmap_create:
-        if "networks" not in myInstance["instance"]:
-            myInstance["instance"]["networks"] = {}
-        for net in args.netmap_create:
-            net_comma_list = net.split(",")
-            for net_comma in net_comma_list:
-                net_tuple = net_comma.split("=")
-                if len(net_tuple) == 1:
-                    net_scenario   = net_tuple[0].strip()
-                    net_datacenter = None
-                elif len(net_tuple) == 2:
-                    net_scenario   = net_tuple[0].strip()
-                    net_datacenter = net_tuple[1].strip()
-                else:
-                    print "error at netmap-create. Expected net-scenario=net-datacenter or net-scenario. (%s)?" % net_comma
-                    return
-                if net_scenario not in myInstance["instance"]["networks"]:
-                    myInstance["instance"]["networks"][net_scenario] = {}
-                if "sites" not in myInstance["instance"]["networks"][net_scenario]:
-                    myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
-                myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-create"] = net_datacenter
-    if args.keypair:
-        if "cloud-config" not in myInstance["instance"]:
-            myInstance["instance"]["cloud-config"] = {}
-        cloud_config = myInstance["instance"]["cloud-config"]
-        for key in args.keypair:
-            index = key.find(":")
-            if index<0:
-                if "key-pairs" not in cloud_config:
-                    cloud_config["key-pairs"] = []
-                cloud_config["key-pairs"].append(key)
-            else:
-                user = key[:index]
-                key_ = key[index+1:]
-                key_list = key_.split(",")
-                if "users" not in cloud_config:
-                    cloud_config["users"] = []
-                cloud_config["users"].append({"name": user, "key-pairs": key_list  })
-    if args.keypair_auto:
-        try:
-            keys=[]
-            home = os.getenv("HOME")
-            user = os.getenv("USER")
-            files = os.listdir(home+'/.ssh')
-            for file in files:
-                if file[-4:] == ".pub":
-                    with open(home+'/.ssh/'+file, 'r') as f:
-                        keys.append(f.read())
-            if not keys:
-                print "Cannot obtain any public ssh key from '{}'. Try not using --keymap-auto".format(home+'/.ssh')
-                return 1
-        except Exception as e:
-            print "Cannot obtain any public ssh key. Error '{}'. Try not using --keymap-auto".format(str(e))
-            return 1
-
-        if "cloud-config" not in myInstance["instance"]:
-            myInstance["instance"]["cloud-config"] = {}
-        cloud_config = myInstance["instance"]["cloud-config"]
-        if "key-pairs" not in cloud_config:
-            cloud_config["key-pairs"] = []
-        if user:
-            if "users" not in cloud_config:
-                cloud_config["users"] = []
-            cloud_config["users"].append({"name": user, "key-pairs": keys })
-
-    payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
-    logger.debug("openmano request: %s", payload_req)
-    URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, tenant)
-    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    if args.verbose==None:
-        args.verbose=0
-
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if args.verbose >= 3:
-        print yaml.safe_dump(content, indent=4, default_flow_style=False)
-        return result
-
-    if mano_response.status_code == 200:
-        myoutput = "{:38} {:20}".format(content['uuid'], content['name'])
-        if args.verbose >=1:
-            myoutput = "{} {:20}".format(myoutput, content['created_at'])
-        if args.verbose >=2:
-            myoutput = "{} {:30}".format(myoutput, content['description'])
-        print myoutput
-    else:
-        print content['error']['description']
-    return result
-
-def instance_scenario_list(args):
-    #print "instance-scenario-list",args
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    if args.name:
-        toshow = _get_item_uuid("instances", args.name, tenant)
-        URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, tenant, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, tenant)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if args.verbose==None:
-        args.verbose=0
-
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    if mano_response.status_code == 200:
-        if not args.name:
-            if args.verbose >= 3:
-                print yaml.safe_dump(content, indent=4, default_flow_style=False)
-                return result
-            if len(content['instances']) == 0:
-                print "No scenario instances were found."
-                return result
-            for instance in content['instances']:
-                myoutput = "{:38} {:20}".format(instance['uuid'], instance['name'])
-                if args.verbose >=1:
-                    myoutput = "{} {:20}".format(myoutput, instance['created_at'])
-                print myoutput
-                if args.verbose >=2:
-                    print "Description: %s" %instance['description']
-        else:
-            if args.verbose:
-                print yaml.safe_dump(content, indent=4, default_flow_style=False)
-                return result
-            instance = content
-            print ("{:38} {:20} {:20}".format(instance['uuid'],instance['name'],instance['created_at']))
-            print ("Description: %s" %instance['description'])
-            print ("Template scenario id: {}".format(instance['scenario_id']))
-            print ("Template scenario name: {}".format(instance['scenario_name']))
-            print ("---------------------------------------")
-            print ("VNF instances: {}".format(len(instance['vnfs'])))
-            for vnf in instance['vnfs']:
-                #print "    %s %s Template vnf name: %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['name'].ljust(20), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38))
-                print ("    {:38} {:20} Template vnf id: {:38}".format(vnf['uuid'], vnf['vnf_name'], vnf['vnf_id']))
-            if len(instance['nets'])>0:
-                print "---------------------------------------"
-                print "Internal nets:"
-                for net in instance['nets']:
-                    if net['created']:
-                        print ("    {:38} {:12} VIM ID: {}".format(net['uuid'], net['status'], net['vim_net_id']))
-                print "---------------------------------------"
-                print "External nets:"
-                for net in instance['nets']:
-                    if not net['created']:
-                        print ("    {:38} {:12} VIM ID: {}".format(net['uuid'], net['status'], net['vim_net_id']))
-            print ("---------------------------------------")
-            print ("VM instances:")
-            for vnf in instance['vnfs']:
-                for vm in vnf['vms']:
-                    print ("    {:38} {:20} {:20} {:12} VIM ID: {}".format(vm['uuid'], vnf['vnf_name'], vm['name'],
-                                                                           vm['status'], vm['vim_vm_id']))
-    else:
-        print content['error']['description']
-        if args.verbose:
-            print yaml.safe_dump(content, indent=4, default_flow_style=False)
-    return result
-
-def instance_scenario_status(args):
-    print "instance-scenario-status"
-    return 0
-
-def instance_scenario_delete(args):
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    todelete = _get_item_uuid("instances", args.name, tenant=tenant)
-    #print "instance-scenario-delete",args
-    if not args.force:
-        r = raw_input("Delete scenario instance %s (y/N)? " %(args.name))
-        if  not (len(r)>0  and r[0].lower()=="y"):
-            return
-    URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, tenant, todelete)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-def get_action(args):
-    if not args.all:
-        tenant = _get_tenant()
-    else:
-        tenant = "any"
-    if not args.instance:
-        instance_id = "any"
-    else:
-        instance_id =args.instance
-    action_id = ""
-    if args.id:
-        action_id = "/" + args.id
-    URLrequest = "http://{}:{}/openmano/{}/instances/{}/action{}".format(mano_host, mano_port, tenant, instance_id,
-                                                                         action_id)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    if args.verbose == None:
-        args.verbose = 0
-    if args.id != None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-def instance_scenario_action(args):
-    #print "instance-scenario-action", args
-    tenant = _get_tenant()
-    toact = _get_item_uuid("instances", args.name, tenant=tenant)
-    action={}
-    action[ args.action ] = yaml.safe_load(args.param)
-    if args.vnf:
-        action["vnfs"] = args.vnf
-    if args.vm:
-        action["vms"] = args.vm
-
-    headers_req = {'content-type': 'application/json'}
-    payload_req = json.dumps(action, indent=4)
-    URLrequest = "http://%s:%s/openmano/%s/instances/%s/action" %(mano_host, mano_port, tenant, toact)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    # print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        if args.verbose:
-            print yaml.safe_dump(content, indent=4, default_flow_style=False)
-            return result
-        if "instance_action_id" in content:
-            print("instance_action_id={}".format(content["instance_action_id"]))
-        else:
-            for uuid,c in content.iteritems():
-                print ("{:38} {:20} {:20}".format(uuid, c.get('name'), c.get('description')))
-    else:
-        print content['error']['description']
-    return result
-
-
-def instance_vnf_list(args):
-    print "instance-vnf-list"
-    return 0
-
-def instance_vnf_status(args):
-    print "instance-vnf-status"
-    return 0
-
-def tenant_create(args):
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    tenant_dict={"name": args.name}
-    if args.description!=None:
-        tenant_dict["description"] = args.description
-    payload_req = json.dumps( {"tenant": tenant_dict })
-
-    #print payload_req
-
-    URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    return _print_verbose(mano_response, args.verbose)
-
-def tenant_list(args):
-    #print "tenant-list",args
-    if args.name:
-        toshow = _get_item_uuid("tenants", args.name)
-        URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    if args.verbose==None:
-        args.verbose=0
-    if args.name!=None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-def tenant_delete(args):
-    #print "tenant-delete",args
-    todelete = _get_item_uuid("tenants", args.name)
-    if not args.force:
-        r = raw_input("Delete tenant %s (y/N)? " %(args.name))
-        if  not (len(r)>0  and r[0].lower()=="y"):
-            return 0
-    URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, todelete)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-def datacenter_attach(args):
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.name)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    datacenter_dict={}
-    if args.vim_tenant_id != None:
-        datacenter_dict['vim_tenant'] = args.vim_tenant_id
-    if args.vim_tenant_name != None:
-        datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
-    if args.user != None:
-        datacenter_dict['vim_username'] = args.user
-    if args.password != None:
-        datacenter_dict['vim_password'] = args.password
-    if args.config!=None:
-        datacenter_dict["config"] = _load_file_or_yaml(args.config)
-
-    payload_req = json.dumps( {"datacenter": datacenter_dict })
-
-    #print payload_req
-
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, datacenter)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = _print_verbose(mano_response, args.verbose)
-    #provide addional information if error
-    if mano_response.status_code != 200:
-        content = mano_response.json()
-        if "already in use for  'name'" in content['error']['description'] and \
-                "to database vim_tenants table" in content['error']['description']:
-            print "Try to specify a different name with --vim-tenant-name"
-    return result
-
-
-def datacenter_edit_vim_tenant(args):
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.name)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    if not (args.vim_tenant_id or args.vim_tenant_name or args.user or args.password or args.config):
-        raise OpenmanoCLIError("Error. At least one parameter must be updated.")
-
-    datacenter_dict = {}
-    if args.vim_tenant_id != None:
-        datacenter_dict['vim_tenant'] = args.vim_tenant_id
-    if args.vim_tenant_name != None:
-        datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
-    if args.user != None:
-        datacenter_dict['vim_username'] = args.user
-    if args.password != None:
-        datacenter_dict['vim_password'] = args.password
-    if args.config != None:
-        datacenter_dict["config"] = _load_file_or_yaml(args.config)
-    payload_req = json.dumps({"datacenter": datacenter_dict})
-
-    # print payload_req
-
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" % (mano_host, mano_port, tenant, datacenter)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-
-    return result
-
-def datacenter_detach(args):
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    datacenter = _get_datacenter(args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, datacenter)
-    mano_response = requests.delete(URLrequest, headers=headers_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-def datacenter_create(args):
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    datacenter_dict={"name": args.name, "vim_url": args.url}
-    if args.description!=None:
-        datacenter_dict["description"] = args.description
-    if args.type!=None:
-        datacenter_dict["type"] = args.type
-    if args.url!=None:
-        datacenter_dict["vim_url_admin"] = args.url_admin
-    if args.config!=None:
-        datacenter_dict["config"] = _load_file_or_yaml(args.config)
-    if args.sdn_controller!=None:
-        tenant = _get_tenant()
-        sdn_controller = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
-        if not 'config' in datacenter_dict:
-            datacenter_dict['config'] = {}
-        datacenter_dict['config']['sdn-controller'] = sdn_controller
-    payload_req = json.dumps( {"datacenter": datacenter_dict })
-
-    #print payload_req
-
-    URLrequest = "http://%s:%s/openmano/datacenters" %(mano_host, mano_port)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    return _print_verbose(mano_response, args.verbose)
-
-def datacenter_delete(args):
-    #print "datacenter-delete",args
-    todelete = _get_item_uuid("datacenters", args.name, "any")
-    if not args.force:
-        r = raw_input("Delete datacenter %s (y/N)? " %(args.name))
-        if  not (len(r)>0  and r[0].lower()=="y"):
-            return 0
-    URLrequest = "http://%s:%s/openmano/datacenters/%s" %(mano_host, mano_port, todelete)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    result = 0 if mano_response.status_code==200 else mano_response.status_code
-    content = mano_response.json()
-    #print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-
-def datacenter_list(args):
-    #print "datacenter-list",args
-    tenant='any' if args.all else _get_tenant()
-
-    if args.name:
-        toshow = _get_item_uuid("datacenters", args.name, tenant)
-        URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/datacenters" %(mano_host, mano_port, tenant)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    if args.verbose==None:
-        args.verbose=0
-    if args.name!=None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-
-def datacenter_sdn_port_mapping_set(args):
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    if not args.file:
-        raise OpenmanoCLIError(
-            "No yaml/json has been provided specifying the SDN port mapping")
-    sdn_port_mapping = _load_file_or_yaml(args.file)
-    payload_req = json.dumps({"sdn_port_mapping": sdn_port_mapping})
-
-    # read
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    port_mapping = mano_response.json()
-    if mano_response.status_code != 200:
-        str(mano_response.json())
-        raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
-    if len(port_mapping["sdn_port_mapping"]["ports_mapping"]) > 0:
-        if not args.force:
-            r = raw_input("Datacenter %s already contains a port mapping. Overwrite? (y/N)? " % (datacenter))
-            if not (len(r) > 0 and r[0].lower() == "y"):
-                return 0
-
-        # clear
-        URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
-        mano_response = requests.delete(URLrequest)
-        logger.debug("openmano response: %s", mano_response.text)
-        if mano_response.status_code != 200:
-            return _print_verbose(mano_response, args.verbose)
-
-    # set
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    return _print_verbose(mano_response, args.verbose)
-
-
-def datacenter_sdn_port_mapping_list(args):
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.name, tenant)
-
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-
-    return _print_verbose(mano_response, 4)
-
-
-def datacenter_sdn_port_mapping_clear(args):
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.name, tenant)
-
-    if not args.force:
-        r = raw_input("Clean SDN port mapping for datacenter %s (y/N)? " %(datacenter))
-        if not (len(r) > 0 and r[0].lower() == "y"):
-            return 0
-
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-
-    return _print_verbose(mano_response, args.verbose)
-
-
-def sdn_controller_create(args):
-    tenant = _get_tenant()
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    error_msg=[]
-    if not args.ip: error_msg.append("'ip'")
-    if not args.port: error_msg.append("'port'")
-    if not args.dpid: error_msg.append("'dpid'")
-    if not args.type: error_msg.append("'type'")
-    if error_msg:
-        raise OpenmanoCLIError("The following arguments are required: " + ",".join(error_msg))
-
-    controller_dict = {}
-    controller_dict['name'] = args.name
-    controller_dict['ip'] = args.ip
-    controller_dict['port'] = int(args.port)
-    controller_dict['dpid'] = args.dpid
-    controller_dict['type'] = args.type
-    if args.description != None:
-        controller_dict['description'] = args.description
-    if args.user != None:
-        controller_dict['user'] = args.user
-    if args.password != None:
-        controller_dict['password'] = args.password
-
-    payload_req = json.dumps({"sdn_controller": controller_dict})
-
-    # print payload_req
-
-    URLrequest = "http://%s:%s/openmano/%s/sdn_controllers" % (mano_host, mano_port, tenant)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-    return result
-
-
-def sdn_controller_edit(args):
-    tenant = _get_tenant()
-    controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    controller_dict = {}
-    if args.new_name:
-        controller_dict['name'] = args.new_name
-    if args.ip:
-        controller_dict['ip'] = args.ip
-    if args.port:
-        controller_dict['port'] = int(args.port)
-    if args.dpid:
-        controller_dict['dpid'] = args.dpid
-    if args.type:
-        controller_dict['type'] = args.type
-    if args.description:
-        controller_dict['description'] = args.description
-    if args.user:
-        controller_dict['user'] = args.user
-    if args.password:
-        controller_dict['password'] = args.password
-
-    if not controller_dict:
-        raise OpenmanoCLIError("At least one parameter must be edited")
-
-    if not args.force:
-        r = raw_input("Update SDN controller {} (y/N)? ".format(args.name))
-        if not (len(r) > 0 and r[0].lower() == "y"):
-            return 0
-
-    payload_req = json.dumps({"sdn_controller": controller_dict})
-    # print payload_req
-
-    URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" % (mano_host, mano_port, tenant, controller_uuid)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-    return result
-
-
-def sdn_controller_list(args):
-    tenant = _get_tenant()
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    if args.name:
-        toshow = _get_item_uuid("sdn_controllers", args.name, tenant)
-        URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" %(mano_host, mano_port, tenant, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/sdn_controllers" %(mano_host, mano_port, tenant)
-    #print URLrequest
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-    if args.verbose==None:
-        args.verbose=0
-    if args.name!=None:
-        args.verbose += 1
-
-    # json.dumps(mano_response.json(), indent=4)
-    return _print_verbose(mano_response, args.verbose)
-
-
-def sdn_controller_delete(args):
-    tenant = _get_tenant()
-    controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
-
-    if not args.force:
-        r = raw_input("Delete SDN controller %s (y/N)? " % (args.name))
-        if not (len(r) > 0 and r[0].lower() == "y"):
-            return 0
-
-    URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" % (mano_host, mano_port, tenant, controller_uuid)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    return _print_verbose(mano_response, args.verbose)
-
-def vim_action(args):
-    #print "datacenter-net-action",args
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.datacenter, tenant)
-    if args.verbose==None:
-        args.verbose=0
-    if args.action=="list":
-        URLrequest = "http://%s:%s/openmano/%s/vim/%s/%ss" %(mano_host, mano_port, tenant, datacenter, args.item)
-        if args.name!=None:
-            args.verbose += 1
-            URLrequest += "/" + args.name
-        mano_response = requests.get(URLrequest)
-        logger.debug("openmano response: %s", mano_response.text )
-        return _print_verbose(mano_response, args.verbose)
-    elif args.action=="delete":
-        URLrequest = "http://%s:%s/openmano/%s/vim/%s/%ss/%s" %(mano_host, mano_port, tenant, datacenter, args.item, args.name)
-        mano_response = requests.delete(URLrequest)
-        logger.debug("openmano response: %s", mano_response.text )
-        result = 0 if mano_response.status_code==200 else mano_response.status_code
-        content = mano_response.json()
-        #print json.dumps(content, indent=4)
-        if mano_response.status_code == 200:
-            print content['result']
-        else:
-            print content['error']['description']
-        return result
-    elif args.action=="create":
-        headers_req = {'content-type': 'application/yaml'}
-        if args.file:
-            create_dict = _load_file_or_yaml(args.file)
-            if args.item not in create_dict:
-                create_dict = {args.item: create_dict}
-        else:
-            create_dict = {args.item:{}}
-        if args.name:
-            create_dict[args.item]['name'] = args.name
-        #if args.description:
-        #    create_dict[args.item]['description'] = args.description
-        if args.item=="network":
-            if args.bind_net:
-                create_dict[args.item]['bind_net'] = args.bind_net
-            if args.type:
-                create_dict[args.item]['type'] = args.type
-            if args.shared:
-                create_dict[args.item]['shared'] = args.shared
-        if "name" not in create_dict[args.item]:
-            print "You must provide a name in the descriptor file or with the --name option"
-            return
-        payload_req = yaml.safe_dump(create_dict, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
-        logger.debug("openmano request: %s", payload_req)
-        URLrequest = "http://%s:%s/openmano/%s/vim/%s/%ss" %(mano_host, mano_port, tenant, datacenter, args.item)
-        mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
-        logger.debug("openmano response: %s", mano_response.text )
-        if args.verbose==None:
-            args.verbose=0
-        return _print_verbose(mano_response, args.verbose)
-
-
-def _get_items(item, item_name_id=None, datacenter=None, tenant=None):
-    URLrequest = "http://%s:%s/openmano" %(mano_host, mano_port)
-    if tenant:
-        URLrequest += "/" + tenant
-    if datacenter:
-        URLrequest += "/vim/" + datacenter
-    if item:
-        URLrequest += "/" + item +"s"
-    if item_name_id:
-        URLrequest += "/" + item_name_id
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text )
-
-    return mano_response
-
-
-def vim_net_sdn_attach(args):
-    #Verify the network exists in the vim
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.datacenter, tenant)
-    result = _get_items('network', item_name_id=args.vim_net, datacenter=datacenter, tenant=tenant)
-    content = yaml.load(result.content)
-    if 'networks' in content:
-        raise OpenmanoCLIError('More than one network in the vim named ' + args.vim_net + '. Use uuid instead')
-    if 'error' in content:
-        raise OpenmanoCLIError(yaml.safe_dump(content))
-    network_uuid = content['network']['id']
-
-    #Make call to attach the dataplane port to the SND network associated to the vim network
-    headers_req = {'content-type': 'application/yaml'}
-    payload_req = {'port': args.port}
-    if args.vlan:
-        payload_req['vlan'] = int(args.vlan)
-    if args.mac:
-        payload_req['mac'] = args.mac
-
-    URLrequest = "http://%s:%s/openmano/%s/vim/%s/network/%s/attach" % (mano_host, mano_port, tenant, datacenter, network_uuid)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=json.dumps(payload_req))
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-    return result
-
-
-def vim_net_sdn_detach(args):
-    if not args.all and not args.id:
-        print "--all or --id must be used"
-        return 1
-
-    # Verify the network exists in the vim
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.datacenter, tenant)
-    result = _get_items('network', item_name_id=args.vim_net, datacenter=datacenter, tenant=tenant)
-    content = yaml.load(result.content)
-    if 'networks' in content:
-        raise OpenmanoCLIError('More than one network in the vim named ' + args.vim_net + '. Use uuid instead')
-    if 'error' in content:
-        raise OpenmanoCLIError(yaml.safe_dump(content))
-    network_uuid = content['network']['id']
-
-    if not args.force:
-        r = raw_input("Confirm action' (y/N)? ")
-        if len(r) == 0 or r[0].lower() != "y":
-            return 0
-
-    if args.id:
-        URLrequest = "http://%s:%s/openmano/%s/vim/%s/network/%s/detach/%s" % (
-            mano_host, mano_port, tenant, datacenter, network_uuid, args.id)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/vim/%s/network/%s/detach" % (
-            mano_host, mano_port, tenant, datacenter, network_uuid)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-    return result
-
-
-def datacenter_net_action(args):
-    if args.action == "net-update":
-        print "This command is deprecated, use 'openmano datacenter-netmap-delete --all' and 'openmano datacenter-netmap-import' instead!!!"
-        print
-        args.action = "netmap-delete"
-        args.netmap = None
-        args.all = True
-        r = datacenter_netmap_action(args)
-        if r == 0:
-            args.force = True
-            args.action = "netmap-import"
-            r = datacenter_netmap_action(args)
-        return r
-
-    if args.action == "net-edit":
-        args.netmap = args.net
-        args.name = None
-    elif args.action == "net-list":
-        args.netmap = None
-    elif args.action == "net-delete":
-        args.netmap = args.net
-        args.all = False
-
-    args.action = "netmap" + args.action[3:]
-    args.vim_name=None
-    args.vim_id=None
-    print "This command is deprecated, use 'openmano datacenter-%s' instead!!!" % args.action
-    print
-    return datacenter_netmap_action(args)
-
-def datacenter_netmap_action(args):
-    tenant = _get_tenant()
-    datacenter = _get_datacenter(args.datacenter, tenant)
-    #print "datacenter_netmap_action",args
-    payload_req = None
-    if args.verbose==None:
-        args.verbose=0
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/netmaps" %(mano_host, mano_port, tenant, datacenter)
-
-    if args.action=="netmap-list":
-        if args.netmap:
-            URLrequest += "/" + args.netmap
-            args.verbose += 1
-        mano_response = requests.get(URLrequest)
-
-    elif args.action=="netmap-delete":
-        if args.netmap and args.all:
-            print "you can not use a netmap name and the option --all at the same time"
-            return 1
-        if args.netmap:
-            force_text= "Delete default netmap '%s' from datacenter '%s' (y/N)? " % (args.netmap, datacenter)
-            URLrequest += "/" + args.netmap
-        elif args.all:
-            force_text="Delete all default netmaps from datacenter '%s' (y/N)? " % (datacenter)
-        else:
-            print "you must specify a netmap name or the option --all"
-            return 1
-        if not args.force:
-            r = raw_input(force_text)
-            if  len(r)>0  and r[0].lower()=="y":
-                pass
-            else:
-                return 0
-        mano_response = requests.delete(URLrequest, headers=headers_req)
-    elif args.action=="netmap-import":
-        if not args.force:
-            r = raw_input("Create all the available networks from datacenter '%s' as default netmaps (y/N)? " % (datacenter))
-            if  len(r)>0  and r[0].lower()=="y":
-                pass
-            else:
-                return 0
-        URLrequest += "/upload"
-        mano_response = requests.post(URLrequest, headers=headers_req)
-    elif args.action=="netmap-edit" or args.action=="netmap-create":
-        if args.file:
-            payload = _load_file_or_yaml(args.file)
-        else:
-            payload = {}
-        if "netmap" not in payload:
-            payload = {"netmap": payload}
-        if args.name:
-            payload["netmap"]["name"] = args.name
-        if args.vim_id:
-            payload["netmap"]["vim_id"] = args.vim_id
-        if args.action=="netmap-create" and args.vim_name:
-            payload["netmap"]["vim_name"] = args.vim_name
-        payload_req = json.dumps(payload)
-        logger.debug("openmano request: %s", payload_req)
-
-        if args.action=="netmap-edit" and not args.force:
-            if len(payload["netmap"]) == 0:
-                print "You must supply some parameter to edit"
-                return 1
-            r = raw_input("Edit default netmap '%s' from datacenter '%s' (y/N)? " % (args.netmap, datacenter))
-            if  len(r)>0  and r[0].lower()=="y":
-                pass
-            else:
-                return 0
-            URLrequest += "/" + args.netmap
-            mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
-        else: #netmap-create
-            if "vim_name" not in payload["netmap"] and "vim_id" not in payload["netmap"]:
-                print "You must supply either --vim-id or --vim-name option; or include one of them in the file descriptor"
-                return 1
-            mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-
-    logger.debug("openmano response: %s", mano_response.text )
-    return _print_verbose(mano_response, args.verbose)
-
-
-def element_edit(args):
-    element = _get_item_uuid(args.element, args.name)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/%s/%s" %(mano_host, mano_port, args.element, element)
-    payload=_load_file_or_yaml(args.file)
-    if args.element[:-1] not in payload:
-        payload = {args.element[:-1]: payload }
-    payload_req = json.dumps(payload)
-
-    #print payload_req
-    if not args.force or (args.name==None and args.filer==None):
-        r = raw_input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ")
-        if  len(r)>0  and r[0].lower()=="y":
-            pass
-        else:
-            return 0
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text )
-    if args.verbose==None:
-        args.verbose=0
-    if args.name!=None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-
-def datacenter_edit(args):
-    tenant = _get_tenant()
-    element = _get_item_uuid('datacenters', args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/datacenters/%s" % (mano_host, mano_port, element)
-
-    has_arguments = False
-    if args.file != None:
-        has_arguments = True
-        payload = _load_file_or_yaml(args.file)
-    else:
-        payload = {}
-
-    if args.sdn_controller != None:
-        has_arguments = True
-        if not 'config' in payload:
-            payload['config'] = {}
-        if not 'sdn-controller' in payload['config']:
-            payload['config']['sdn-controller'] = {}
-        if args.sdn_controller == 'null':
-            payload['config']['sdn-controller'] = None
-        else:
-            payload['config']['sdn-controller'] = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
-
-    if not has_arguments:
-        raise OpenmanoCLIError("At least one argument must be provided to modify the datacenter")
-
-    if 'datacenter' not in payload:
-        payload = {'datacenter': payload}
-    payload_req = json.dumps(payload)
-
-    # print payload_req
-    if not args.force or (args.name == None and args.filer == None):
-        r = raw_input(" Edit datacenter " + args.name + " (y/N)? ")
-        if len(r) > 0 and r[0].lower() == "y":
-            pass
-        else:
-            return 0
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    if args.verbose == None:
-        args.verbose = 0
-    if args.name != None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-
-# WIM
-def wim_account_create(args):
-    tenant = _get_tenant()
-    wim = _get_wim(args.name)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    wim_dict = {}
-    if args.account_name is not None:
-        wim_dict['name'] = args.account_name
-    if args.user is not None:
-        wim_dict['user'] = args.user
-    if args.password is not None:
-        wim_dict['password'] = args.password
-    if args.config is not None:
-        wim_dict["config"] = _load_file_or_yaml(args.config)
-
-    payload_req = json.dumps({"wim_account": wim_dict})
-
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, wim)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-    # provide addional information if error
-    if mano_response.status_code != 200:
-        content = mano_response.json()
-        if "already in use for  'name'" in content['error']['description'] and \
-                "to database wim_tenants table" in content['error']['description']:
-            print "Try to specify a different name with --wim-tenant-name"
-    return result
-
-
-def wim_account_delete(args):
-    if args.all:
-        tenant = "any"
-    else:
-        tenant = _get_tenant()
-    wim = _get_wim(args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, wim)
-    mano_response = requests.delete(URLrequest, headers=headers_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    content = mano_response.json()
-    # print json.dumps(content, indent=4)
-    result = 0 if mano_response.status_code == 200 else mano_response.status_code
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-
-def wim_account_edit(args):
-    tenant = _get_tenant()
-    wim = _get_wim(args.name)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    wim_dict = {}
-    if not args.account_name:
-        wim_dict['name'] = args.vim_tenant_name
-    if not args.user:
-        wim_dict['user'] = args.user
-    if not args.password:
-        wim_dict['password'] = args.password
-    if not args.config:
-        wim_dict["config"] = _load_file_or_yaml(args.config)
-
-    payload_req = json.dumps({"wim_account": wim_dict})
-
-    # print payload_req
-
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, wim)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-    # provide addional information if error
-    if mano_response.status_code != 200:
-        content = mano_response.json()
-        if "already in use for  'name'" in content['error']['description'] and \
-                "to database wim_tenants table" in content['error']['description']:
-            print "Try to specify a different name with --wim-tenant-name"
-    return result
-
-def wim_create(args):
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    wim_dict = {"name": args.name, "wim_url": args.url}
-    if args.description != None:
-        wim_dict["description"] = args.description
-    if args.type != None:
-        wim_dict["type"] = args.type
-    if args.config != None:
-        wim_dict["config"] = _load_file_or_yaml(args.config)
-
-    payload_req = json.dumps({"wim": wim_dict})
-
-    URLrequest = "http://%s:%s/openmano/wims" % (mano_host, mano_port)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    return _print_verbose(mano_response, args.verbose)
-
-
-def wim_edit(args):
-    tenant = _get_tenant()
-    element = _get_item_uuid('wims', args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/wims/%s" % (mano_host, mano_port, element)
-
-    has_arguments = False
-    if args.file != None:
-        has_arguments = True
-        payload = _load_file_or_yaml(args.file)
-    else:
-        payload = {}
-
-    if not has_arguments:
-        raise OpenmanoCLIError("At least one argument must be provided to modify the wim")
-
-    if 'wim' not in payload:
-        payload = {'wim': payload}
-    payload_req = json.dumps(payload)
-
-    # print payload_req
-    if not args.force or (args.name == None and args.filer == None):
-        r = raw_input(" Edit wim " + args.name + " (y/N)? ")
-        if len(r) > 0 and r[0].lower() == "y":
-            pass
-        else:
-            return 0
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    if args.verbose == None:
-        args.verbose = 0
-    if args.name != None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-
-def wim_delete(args):
-    # print "wim-delete",args
-    todelete = _get_item_uuid("wims", args.name, "any")
-    if not args.force:
-        r = raw_input("Delete wim %s (y/N)? " % (args.name))
-        if not (len(r) > 0 and r[0].lower() == "y"):
-            return 0
-    URLrequest = "http://%s:%s/openmano/wims/%s" % (mano_host, mano_port, todelete)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    result = 0 if mano_response.status_code == 200 else mano_response.status_code
-    content = mano_response.json()
-    # print json.dumps(content, indent=4)
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-
-def wim_list(args):
-    # print "wim-list",args
-    tenant = 'any' if args.all else _get_tenant()
-
-    if args.name:
-        toshow = _get_item_uuid("wims", args.name, tenant)
-        URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, toshow)
-    else:
-        URLrequest = "http://%s:%s/openmano/%s/wims" % (mano_host, mano_port, tenant)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    if args.verbose == None:
-        args.verbose = 0
-    if args.name != None:
-        args.verbose += 1
-    return _print_verbose(mano_response, args.verbose)
-
-
-def wim_port_mapping_set(args):
-    tenant = _get_tenant()
-    wim = _get_wim(args.name, tenant)
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-
-    if not args.file:
-        raise OpenmanoCLIError(
-            "No yaml/json has been provided specifying the WIM port mapping")
-    wim_port_mapping = _load_file_or_yaml(args.file)
-
-    payload_req = json.dumps({"wim_port_mapping": wim_port_mapping})
-
-    # read
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    port_mapping = mano_response.json()
-
-    if mano_response.status_code != 200:
-        str(mano_response.json())
-        raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
-    # TODO: check this if statement
-    if len(port_mapping["wim_port_mapping"]) > 0:
-        if not args.force:
-            r = raw_input("WIM %s already contains a port mapping. Overwrite? (y/N)? " % (wim))
-            if not (len(r) > 0 and r[0].lower() == "y"):
-                return 0
-
-        # clear
-        URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
-        mano_response = requests.delete(URLrequest)
-        logger.debug("openmano response: %s", mano_response.text)
-        if mano_response.status_code != 200:
-            return _print_verbose(mano_response, args.verbose)
-
-    # set
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
-    logger.debug("openmano request: %s", payload_req)
-    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    return _print_verbose(mano_response, 4)
-
-
-def wim_port_mapping_list(args):
-    tenant = _get_tenant()
-    wim = _get_wim(args.name, tenant)
-
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
-    mano_response = requests.get(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-
-    return _print_verbose(mano_response, 4)
-
-
-def wim_port_mapping_clear(args):
-    tenant = _get_tenant()
-    wim = _get_wim(args.name, tenant)
-
-    if not args.force:
-        r = raw_input("Clear WIM port mapping for wim %s (y/N)? " % (wim))
-        if not (len(r) > 0 and r[0].lower() == "y"):
-            return 0
-
-    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
-    mano_response = requests.delete(URLrequest)
-    logger.debug("openmano response: %s", mano_response.text)
-    content = mano_response.json()
-    # print json.dumps(content, indent=4)
-    result = 0 if mano_response.status_code == 200 else mano_response.status_code
-    if mano_response.status_code == 200:
-        print content['result']
-    else:
-        print content['error']['description']
-    return result
-
-
-def version(args):
-    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    URLrequest = "http://%s:%s/openmano/version" % (mano_host, mano_port)
-
-    mano_response = requests.get(URLrequest, headers=headers_req)
-    logger.debug("openmano response: %s", mano_response.text)
-    print mano_response.text
-
-
-global mano_host
-global mano_port
-global mano_tenant
-
-if __name__=="__main__":
-
-    mano_tenant = os.getenv('OPENMANO_TENANT', None)
-    mano_host = os.getenv('OPENMANO_HOST',"localhost")
-    mano_port = os.getenv('OPENMANO_PORT',"9090")
-    mano_datacenter = os.getenv('OPENMANO_DATACENTER',None)
-    # WIM env variable for default WIM
-    mano_wim = os.getenv('OPENMANO_WIM', None)
-
-    main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)')
-    main_parser.add_argument('--version', action='version', help="get version of this client",
-                            version='%(prog)s client version ' + __version__ +
-                                    " (Note: use '%(prog)s version' to get server version)")
-
-    subparsers = main_parser.add_subparsers(help='commands')
-
-    parent_parser = argparse.ArgumentParser(add_help=False)
-    parent_parser.add_argument('--verbose', '-v', action='count', help="increase verbosity level. Use several times")
-    parent_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
-
-    config_parser = subparsers.add_parser('config', parents=[parent_parser], help="prints configuration values")
-    config_parser.add_argument("-n", action="store_true", help="resolves tenant and datacenter names")
-    config_parser.set_defaults(func=config)
-
-    version_parser = subparsers.add_parser('version', parents=[parent_parser], help="get server version")
-    version_parser.set_defaults(func=version)
-
-    vnf_create_parser = subparsers.add_parser('vnf-create', parents=[parent_parser], help="adds a vnf into the catalogue")
-    vnf_create_parser.add_argument("file", action="store", help="location of the JSON file describing the VNF").completer = FilesCompleter
-    vnf_create_parser.add_argument("--name", action="store", help="name of the VNF (if it exists in the VNF descriptor, it is overwritten)")
-    vnf_create_parser.add_argument("--description", action="store", help="description of the VNF (if it exists in the VNF descriptor, it is overwritten)")
-    vnf_create_parser.add_argument("--image-path", action="store",  help="change image path locations (overwritten)")
-    vnf_create_parser.add_argument("--image-name", action="store",  help="change image name (overwritten)")
-    vnf_create_parser.add_argument("--image-checksum", action="store",  help="change image checksum (overwritten)")
-    vnf_create_parser.set_defaults(func=vnf_create)
-
-    vnf_list_parser = subparsers.add_parser('vnf-list', parents=[parent_parser], help="lists information about a vnf")
-    vnf_list_parser.add_argument("name", nargs='?', help="name of the VNF")
-    vnf_list_parser.add_argument("-a", "--all", action="store_true", help="shows all vnfs, not only the owned or public ones")
-    #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true")
-    vnf_list_parser.set_defaults(func=vnf_list)
-
-    vnf_delete_parser = subparsers.add_parser('vnf-delete', parents=[parent_parser], help="deletes a vnf from the catalogue")
-    vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted")
-    vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
-    vnf_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
-    vnf_delete_parser.set_defaults(func=vnf_delete)
-
-    scenario_create_parser = subparsers.add_parser('scenario-create', parents=[parent_parser], help="adds a scenario into the OPENMANO DB")
-    scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario").completer = FilesCompleter
-    scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)")
-    scenario_create_parser.add_argument("--description", action="store", help="description of the scenario (if it exists in the YAML scenario, it is overwritten)")
-    scenario_create_parser.set_defaults(func=scenario_create)
-
-    scenario_list_parser = subparsers.add_parser('scenario-list', parents=[parent_parser], help="lists information about a scenario")
-    scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario")
-    #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true")
-    scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all scenarios, not only the owned or public ones")
-    scenario_list_parser.set_defaults(func=scenario_list)
-
-    scenario_delete_parser = subparsers.add_parser('scenario-delete', parents=[parent_parser], help="deletes a scenario from the OPENMANO DB")
-    scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted")
-    scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
-    scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
-    scenario_delete_parser.set_defaults(func=scenario_delete)
-
-    scenario_deploy_parser = subparsers.add_parser('scenario-deploy', parents=[parent_parser], help="deploys a scenario")
-    scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be deployed")
-    scenario_deploy_parser.add_argument("name", action="store", help="name of the instance")
-    scenario_deploy_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
-    scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
-    scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance")
-    scenario_deploy_parser.set_defaults(func=scenario_deploy)
-
-    scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)")
-    scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified")
-    scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
-    scenario_deploy_parser.set_defaults(func=scenario_verify)
-
-    instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', parents=[parent_parser], help="deploys a scenario")
-    instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text")
-    instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed")
-    instance_scenario_create_parser.add_argument("--name", action="store", help="name of the instance")
-    instance_scenario_create_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
-    instance_scenario_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
-    instance_scenario_create_parser.add_argument("--netmap-use", action="append", type=str, dest="netmap_use", help="indicates a datacenter network to map a scenario network 'scenario-network=datacenter-network'. Can be used several times")
-    instance_scenario_create_parser.add_argument("--netmap-create", action="append", type=str, dest="netmap_create", help="the scenario network must be created at datacenter 'scenario-network[=datacenter-network-name]' . Can be used several times")
-    instance_scenario_create_parser.add_argument("--keypair", action="append", type=str, dest="keypair", help="public key for ssh access. Format '[user:]key1[,key2...]'. Can be used several times")
-    instance_scenario_create_parser.add_argument("--keypair-auto", action="store_true", dest="keypair_auto", help="Inject the user ssh-keys found at $HOME/.ssh directory")
-    instance_scenario_create_parser.add_argument("--description", action="store", help="description of the instance")
-    instance_scenario_create_parser.set_defaults(func=instance_create)
-
-    instance_scenario_list_parser = subparsers.add_parser('instance-scenario-list', parents=[parent_parser], help="lists information about a scenario instance")
-    instance_scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario instance")
-    instance_scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all instance-scenarios, not only the owned")
-    instance_scenario_list_parser.set_defaults(func=instance_scenario_list)
-
-    instance_scenario_delete_parser = subparsers.add_parser('instance-scenario-delete', parents=[parent_parser], help="deletes a scenario instance (and deletes all VM and net instances in VIM)")
-    instance_scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario instance to be deleted")
-    instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
-    instance_scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
-    instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete)
-
-    instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', parents=[parent_parser], help="invoke an action over part or the whole scenario instance")
-    instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
-    instance_scenario_action_parser.add_argument("action", action="store", type=str, \
-            choices=["start","pause","resume","shutoff","shutdown","forceOff","rebuild","reboot", "console", "add_public_key","vdu-scaling"],\
-            help="action to send")
-    instance_scenario_action_parser.add_argument("param", nargs='?', help="addional param of the action. e.g. console: novnc; reboot: type; vdu-scaling: '[{vdu-id: xxx, type: create|delete, count: 1}]'")
-    instance_scenario_action_parser.add_argument("--vnf", action="append", help="VNF to act on (can use several entries)")
-    instance_scenario_action_parser.add_argument("--vm", action="append", help="VM to act on (can use several entries)")
-    instance_scenario_action_parser.set_defaults(func=instance_scenario_action)
-
-    action_parser = subparsers.add_parser('action-list', parents=[parent_parser], help="get action over an instance status")
-    action_parser.add_argument("id", nargs='?', action="store", help="action id")
-    action_parser.add_argument("--instance", action="store", help="fitler by this instance_id")
-    action_parser.add_argument("--all", action="store", help="Not filter by tenant")
-    action_parser.set_defaults(func=get_action)
-
-    #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance")
-    #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
-    #instance_scenario_status_parser.set_defaults(func=instance_scenario_status)
-
-    tenant_create_parser = subparsers.add_parser('tenant-create', parents=[parent_parser], help="creates a new tenant")
-    tenant_create_parser.add_argument("name", action="store", help="name for the tenant")
-    tenant_create_parser.add_argument("--description", action="store", help="description of the tenant")
-    tenant_create_parser.set_defaults(func=tenant_create)
-
-    tenant_delete_parser = subparsers.add_parser('tenant-delete', parents=[parent_parser], help="deletes a tenant from the catalogue")
-    tenant_delete_parser.add_argument("name", action="store", help="name or uuid of the tenant to be deleted")
-    tenant_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
-    tenant_delete_parser.set_defaults(func=tenant_delete)
-
-    tenant_list_parser = subparsers.add_parser('tenant-list', parents=[parent_parser], help="lists information about a tenant")
-    tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant")
-    tenant_list_parser.set_defaults(func=tenant_list)
-
-    element_edit_parser = subparsers.add_parser('tenant-edit', parents=[parent_parser], help="edits one tenant")
-    element_edit_parser.add_argument("name", help="name or uuid of the tenant")
-    element_edit_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
-    element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
-    element_edit_parser.set_defaults(func=element_edit, element='tenants')
-
-    datacenter_create_parser = subparsers.add_parser('datacenter-create', parents=[parent_parser], help="creates a new datacenter")
-    datacenter_create_parser.add_argument("name", action="store", help="name for the datacenter")
-    datacenter_create_parser.add_argument("url", action="store", help="url for the datacenter")
-    datacenter_create_parser.add_argument("--url_admin", action="store", help="url for administration for the datacenter")
-    datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)")
-    datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
-    datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter")
-    datacenter_create_parser.add_argument("--sdn-controller", action="store", help="Name or uuid of the SDN controller to be used", dest='sdn_controller')
-    datacenter_create_parser.set_defaults(func=datacenter_create)
-
-    datacenter_delete_parser = subparsers.add_parser('datacenter-delete', parents=[parent_parser], help="deletes a datacenter from the catalogue")
-    datacenter_delete_parser.add_argument("name", action="store", help="name or uuid of the datacenter to be deleted")
-    datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
-    datacenter_delete_parser.set_defaults(func=datacenter_delete)
-
-    datacenter_edit_parser = subparsers.add_parser('datacenter-edit', parents=[parent_parser], help="Edit datacenter")
-    datacenter_edit_parser.add_argument("name", help="name or uuid of the datacenter")
-    datacenter_edit_parser.add_argument("--file", help="json/yaml text or file with the changes").completer = FilesCompleter
-    datacenter_edit_parser.add_argument("--sdn-controller", action="store",
-                                          help="Name or uuid of the SDN controller to be used. Specify 'null' to clear entry", dest='sdn_controller')
-    datacenter_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
-    datacenter_edit_parser.set_defaults(func=datacenter_edit)
-
-    datacenter_list_parser = subparsers.add_parser('datacenter-list', parents=[parent_parser], help="lists information about a datacenter")
-    datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter")
-    datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant")
-    datacenter_list_parser.set_defaults(func=datacenter_list)
-
-    datacenter_attach_parser = subparsers.add_parser('datacenter-attach', parents=[parent_parser], help="associates a datacenter to the operating tenant")
-    datacenter_attach_parser.add_argument("name", help="name or uuid of the datacenter")
-    datacenter_attach_parser.add_argument('--vim-tenant-id', action='store', help="specify a datacenter tenant to use. A new one is created by default")
-    datacenter_attach_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
-    datacenter_attach_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
-    datacenter_attach_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
-    datacenter_attach_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
-    datacenter_attach_parser.set_defaults(func=datacenter_attach)
-
-    datacenter_edit_vim_tenant_parser = subparsers.add_parser('datacenter-edit-vim-tenant', parents=[parent_parser],
-                                                     help="Edit the association of a datacenter to the operating tenant")
-    datacenter_edit_vim_tenant_parser.add_argument("name", help="name or uuid of the datacenter")
-    datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-id', action='store',
-                                          help="specify a datacenter tenant to use. A new one is created by default")
-    datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
-    datacenter_edit_vim_tenant_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
-    datacenter_edit_vim_tenant_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
-    datacenter_edit_vim_tenant_parser.add_argument("--config", action="store",
-                                          help="aditional configuration in json/yaml format")
-    datacenter_edit_vim_tenant_parser.set_defaults(func=datacenter_edit_vim_tenant)
-
-    datacenter_detach_parser = subparsers.add_parser('datacenter-detach', parents=[parent_parser], help="removes the association between a datacenter and the operating tenant")
-    datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter")
-    datacenter_detach_parser.add_argument("-a", "--all", action="store_true", help="removes all associations from this datacenter")
-    datacenter_detach_parser.set_defaults(func=datacenter_detach)
-
-    #=======================datacenter_sdn_port_mapping_xxx section=======================
-    #datacenter_sdn_port_mapping_set
-    datacenter_sdn_port_mapping_set_parser = subparsers.add_parser('datacenter-sdn-port-mapping-set',
-                                                                   parents=[parent_parser],
-                                                                   help="Load a file with the mapping of physical ports "
-                                                                        "and the ports of the dataplaneswitch controlled "
-                                                                        "by a datacenter")
-    datacenter_sdn_port_mapping_set_parser.add_argument("name", action="store", help="specifies the datacenter")
-    datacenter_sdn_port_mapping_set_parser.add_argument("file",
-                                                        help="json/yaml text or file with the port mapping").completer = FilesCompleter
-    datacenter_sdn_port_mapping_set_parser.add_argument("-f", "--force", action="store_true",
-                                                          help="forces overwriting without asking")
-    datacenter_sdn_port_mapping_set_parser.set_defaults(func=datacenter_sdn_port_mapping_set)
-
-    #datacenter_sdn_port_mapping_list
-    datacenter_sdn_port_mapping_list_parser = subparsers.add_parser('datacenter-sdn-port-mapping-list',
-                                                                    parents=[parent_parser],
-                                                                    help="Show the SDN port mapping in a datacenter")
-    datacenter_sdn_port_mapping_list_parser.add_argument("name", action="store", help="specifies the datacenter")
-    datacenter_sdn_port_mapping_list_parser.set_defaults(func=datacenter_sdn_port_mapping_list)
-
-    # datacenter_sdn_port_mapping_clear
-    datacenter_sdn_port_mapping_clear_parser = subparsers.add_parser('datacenter-sdn-port-mapping-clear',
-                                                                    parents=[parent_parser],
-                                                                    help="Clean the the SDN port mapping in a datacenter")
-    datacenter_sdn_port_mapping_clear_parser.add_argument("name", action="store",
-                                                         help="specifies the datacenter")
-    datacenter_sdn_port_mapping_clear_parser.add_argument("-f", "--force", action="store_true",
-                                              help="forces clearing without asking")
-    datacenter_sdn_port_mapping_clear_parser.set_defaults(func=datacenter_sdn_port_mapping_clear)
-    # =======================
-
-    # =======================sdn_controller_xxx section=======================
-    # sdn_controller_create
-    sdn_controller_create_parser = subparsers.add_parser('sdn-controller-create', parents=[parent_parser],
-                                                        help="Creates an SDN controller entity within RO")
-    sdn_controller_create_parser.add_argument("name", help="name of the SDN controller")
-    sdn_controller_create_parser.add_argument("--description", action="store", help="description of the SDN controller")
-    sdn_controller_create_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
-    sdn_controller_create_parser.add_argument("--port", action="store", help="Port of the SDN controller")
-    sdn_controller_create_parser.add_argument("--dpid", action="store",
-                                             help="DPID of the dataplane switch controlled by this SDN controller")
-    sdn_controller_create_parser.add_argument("--type", action="store",
-                                             help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
-    sdn_controller_create_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
-    sdn_controller_create_parser.add_argument("--passwd", action="store", dest='password',
-                                             help="password credentials for the SDN controller")
-    sdn_controller_create_parser.set_defaults(func=sdn_controller_create)
-
-    # sdn_controller_edit
-    sdn_controller_edit_parser = subparsers.add_parser('sdn-controller-edit', parents=[parent_parser],
-                                                        help="Update one or more options of a SDN controller")
-    sdn_controller_edit_parser.add_argument("name", help="name or uuid of the SDN controller", )
-    sdn_controller_edit_parser.add_argument("--name", action="store", help="Update the name of the SDN controller",
-                                              dest='new_name')
-    sdn_controller_edit_parser.add_argument("--description", action="store", help="description of the SDN controller")
-    sdn_controller_edit_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
-    sdn_controller_edit_parser.add_argument("--port", action="store", help="Port of the SDN controller")
-    sdn_controller_edit_parser.add_argument("--dpid", action="store",
-                                             help="DPID of the dataplane switch controlled by this SDN controller")
-    sdn_controller_edit_parser.add_argument("--type", action="store",
-                                             help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
-    sdn_controller_edit_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
-    sdn_controller_edit_parser.add_argument("--password", action="store",
-                                             help="password credentials for the SDN controller", dest='password')
-    sdn_controller_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
-    #TODO: include option --file
-    sdn_controller_edit_parser.set_defaults(func=sdn_controller_edit)
-
-    #sdn_controller_list
-    sdn_controller_list_parser = subparsers.add_parser('sdn-controller-list',
-                                                                    parents=[parent_parser],
-                                                                    help="List the SDN controllers")
-    sdn_controller_list_parser.add_argument("name", nargs='?', help="name or uuid of the SDN controller")
-    sdn_controller_list_parser.set_defaults(func=sdn_controller_list)
-
-    # sdn_controller_delete
-    sdn_controller_delete_parser = subparsers.add_parser('sdn-controller-delete',
-                                                                    parents=[parent_parser],
-                                                                    help="Delete the the SDN controller")
-    sdn_controller_delete_parser.add_argument("name", help="name or uuid of the SDN controller")
-    sdn_controller_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
-    sdn_controller_delete_parser.set_defaults(func=sdn_controller_delete)
-    # =======================
-
-    # WIM ======================= WIM section==================
-
-    # WIM create
-    wim_create_parser = subparsers.add_parser('wim-create',
-                                              parents=[parent_parser], help="creates a new wim")
-    wim_create_parser.add_argument("name", action="store",
-                                   help="name for the wim")
-    wim_create_parser.add_argument("url", action="store",
-                                   help="url for the wim")
-    wim_create_parser.add_argument("--type", action="store",
-                                   help="wim type: tapi, onos, dynpac or odl (default)")
-    wim_create_parser.add_argument("--config", action="store",
-                                   help="additional configuration in json/yaml format")
-    wim_create_parser.add_argument("--description", action="store",
-                                   help="description of the wim")
-    wim_create_parser.set_defaults(func=wim_create)
-
-    # WIM delete
-    wim_delete_parser = subparsers.add_parser('wim-delete',
-                                              parents=[parent_parser], help="deletes a wim from the catalogue")
-    wim_delete_parser.add_argument("name", action="store",
-                                   help="name or uuid of the wim to be deleted")
-    wim_delete_parser.add_argument("-f", "--force", action="store_true",
-                                   help="forces deletion without asking")
-    wim_delete_parser.set_defaults(func=wim_delete)
-
-    # WIM edit
-    wim_edit_parser = subparsers.add_parser('wim-edit',
-                                            parents=[parent_parser], help="edits a wim")
-    wim_edit_parser.add_argument("name", help="name or uuid of the wim")
-    wim_edit_parser.add_argument("--file",
-                                 help="json/yaml text or file with the changes")\
-                                .completer = FilesCompleter
-    wim_edit_parser.add_argument("-f", "--force", action="store_true",
-                                 help="do not prompt for confirmation")
-    wim_edit_parser.set_defaults(func=wim_edit)
-
-    # WIM list
-    wim_list_parser = subparsers.add_parser('wim-list',
-                                            parents=[parent_parser],
-                                            help="lists information about registered wims")
-    wim_list_parser.add_argument("name", nargs='?',
-                                 help="name or uuid of the wim")
-    wim_list_parser.add_argument("-a", "--all", action="store_true",
-                                 help="shows all wims, not only wims attached to tenant")
-    wim_list_parser.set_defaults(func=wim_list)
-
-    # WIM account create
-    wim_attach_parser = subparsers.add_parser('wim-account-create', parents=
-    [parent_parser], help="associates a wim account to the operating tenant")
-    wim_attach_parser.add_argument("name", help="name or uuid of the wim")
-    wim_attach_parser.add_argument('--account-name', action='store',
-                                   help="specify a name for the wim account.")
-    wim_attach_parser.add_argument("--user", action="store",
-                                   help="user credentials for the wim account")
-    wim_attach_parser.add_argument("--password", action="store",
-                                   help="password credentials for the wim account")
-    wim_attach_parser.add_argument("--config", action="store",
-                                   help="additional configuration in json/yaml format")
-    wim_attach_parser.set_defaults(func=wim_account_create)
-
-    # WIM account delete
-    wim_detach_parser = subparsers.add_parser('wim-account-delete',
-                                        parents=[parent_parser],
-                                        help="removes the association "
-                                                "between a wim account and the operating tenant")
-    wim_detach_parser.add_argument("name", help="name or uuid of the wim")
-    wim_detach_parser.add_argument("-a", "--all", action="store_true",
-                                   help="removes all associations from this wim")
-    wim_detach_parser.add_argument("-f", "--force", action="store_true",
-                                   help="forces delete without asking")
-    wim_detach_parser.set_defaults(func=wim_account_delete)
-
-    # WIM account edit
-    wim_attach_edit_parser = subparsers.add_parser('wim-account-edit', parents=
-    [parent_parser], help="modifies the association of a wim account to the operating tenant")
-    wim_attach_edit_parser.add_argument("name", help="name or uuid of the wim")
-    wim_attach_edit_parser.add_argument('--account-name', action='store',
-                                   help="specify a name for the wim account.")
-    wim_attach_edit_parser.add_argument("--user", action="store",
-                                   help="user credentials for the wim account")
-    wim_attach_edit_parser.add_argument("--password", action="store",
-                                   help="password credentials for the wim account")
-    wim_attach_edit_parser.add_argument("--config", action="store",
-                                   help="additional configuration in json/yaml format")
-    wim_attach_edit_parser.set_defaults(func=wim_account_edit)
-
-    # WIM port mapping set
-    wim_port_mapping_set_parser = subparsers.add_parser('wim-port-mapping-set',
-                                                        parents=[parent_parser],
-                                                        help="Load a file with the mappings "
-                                                                "of ports of a WAN switch that is "
-                                                                "connected to a PoP and the ports "
-                                                                "of the switch controlled by the PoP")
-    wim_port_mapping_set_parser.add_argument("name", action="store",
-                                             help="specifies the wim")
-    wim_port_mapping_set_parser.add_argument("file",
-                                             help="json/yaml text or file with the wim port mapping")\
-        .completer = FilesCompleter
-    wim_port_mapping_set_parser.add_argument("-f", "--force",
-                                             action="store_true", help="forces overwriting without asking")
-    wim_port_mapping_set_parser.set_defaults(func=wim_port_mapping_set)
-
-    # WIM port mapping list
-    wim_port_mapping_list_parser = subparsers.add_parser('wim-port-mapping-list',
-            parents=[parent_parser], help="Show the port mappings for a wim")
-    wim_port_mapping_list_parser.add_argument("name", action="store",
-                                              help="specifies the wim")
-    wim_port_mapping_list_parser.set_defaults(func=wim_port_mapping_list)
-
-    # WIM port mapping clear
-    wim_port_mapping_clear_parser = subparsers.add_parser('wim-port-mapping-clear',
-            parents=[parent_parser], help="Clean the port mapping in a wim")
-    wim_port_mapping_clear_parser.add_argument("name", action="store",
-                                               help="specifies the wim")
-    wim_port_mapping_clear_parser.add_argument("-f", "--force",
-                                               action="store_true",
-                                               help="forces clearing without asking")
-    wim_port_mapping_clear_parser.set_defaults(func=wim_port_mapping_clear)
-
-    # =======================================================
-
-    action_dict={'net-update': 'retrieves external networks from datacenter',
-                 'net-edit': 'edits an external network',
-                 'net-delete': 'deletes an external network',
-                 'net-list': 'lists external networks from a datacenter'
-                 }
-    for item in action_dict:
-        datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
-        datacenter_action_parser.add_argument("datacenter", help="name or uuid of the datacenter")
-        if item=='net-edit' or item=='net-delete':
-            datacenter_action_parser.add_argument("net", help="name or uuid of the datacenter net")
-        if item=='net-edit':
-            datacenter_action_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
-        if item!='net-list':
-            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
-        datacenter_action_parser.set_defaults(func=datacenter_net_action, action=item)
-
-
-    action_dict={'netmap-import': 'create network senario netmap base on the datacenter networks',
-                 'netmap-create': 'create a new network senario netmap',
-                 'netmap-edit':   'edit name of a network senario netmap',
-                 'netmap-delete': 'deletes a network scenario netmap (--all for clearing all)',
-                 'netmap-list':   'list/show network scenario netmaps'
-                 }
-    for item in action_dict:
-        datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
-        datacenter_action_parser.add_argument("--datacenter", help="name or uuid of the datacenter")
-        #if item=='net-add':
-        #    datacenter_action_parser.add_argument("net", help="name of the network")
-        if item=='netmap-delete':
-            datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to delete")
-            datacenter_action_parser.add_argument("--all", action="store_true", help="delete all netmap of this datacenter")
-            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
-        if item=='netmap-edit':
-            datacenter_action_parser.add_argument("netmap", help="name or uuid of the datacenter netmap do edit")
-            datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file with the changes").completer = FilesCompleter
-            datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap")
-            datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
-            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
-        if item=='netmap-list':
-            datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to show")
-        if item=='netmap-create':
-            datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file descriptor with the changes").completer = FilesCompleter
-            datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap, by default same as vim-name")
-            datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
-            datacenter_action_parser.add_argument('--vim-name', action='store', help="specify vim network name")
-        if item=='netmap-import':
-            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
-        datacenter_action_parser.set_defaults(func=datacenter_netmap_action, action=item)
-
-    # =======================vim_net_sdn_xxx section=======================
-    # vim_net_sdn_attach
-    vim_net_sdn_attach_parser = subparsers.add_parser('vim-net-sdn-attach',
-                                                      parents=[parent_parser],
-                                                      help="Specify the port to access to an external network using SDN")
-    vim_net_sdn_attach_parser.add_argument("vim_net", action="store",
-                                                help="Name/id of the network in the vim that will be used to connect to the external network")
-    vim_net_sdn_attach_parser.add_argument("port", action="store", help="Specifies the port in the dataplane switch to access to the external network")
-    vim_net_sdn_attach_parser.add_argument("--vlan", action="store", help="Specifies the vlan (if any) to use in the defined port")
-    vim_net_sdn_attach_parser.add_argument("--mac", action="store", help="Specifies the MAC (if known) of the physical device that will be reachable by this external port")
-    vim_net_sdn_attach_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
-    vim_net_sdn_attach_parser.set_defaults(func=vim_net_sdn_attach)
-
-    # vim_net_sdn_detach
-    vim_net_sdn_detach_parser = subparsers.add_parser('vim-net-sdn-detach',
-                                                           parents=[parent_parser],
-                                                           help="Remove the port information to access to an external network using SDN")
-
-    vim_net_sdn_detach_parser.add_argument("vim_net", action="store", help="Name/id of the vim network")
-    vim_net_sdn_detach_parser.add_argument("--id", action="store",help="Specify the uuid of the external ports from this network to be detached")
-    vim_net_sdn_detach_parser.add_argument("--all", action="store_true", help="Detach all external ports from this network")
-    vim_net_sdn_detach_parser.add_argument("-f", "--force", action="store_true", help="forces clearing without asking")
-    vim_net_sdn_detach_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
-    vim_net_sdn_detach_parser.set_defaults(func=vim_net_sdn_detach)
-    # =======================
-
-    for item in ("network", "tenant", "image"):
-        if item=="network":
-            command_name = 'vim-net'
-        else:
-            command_name = 'vim-'+item
-        vim_item_list_parser = subparsers.add_parser(command_name + '-list', parents=[parent_parser], help="list the vim " + item + "s")
-        vim_item_list_parser.add_argument("name", nargs='?', help="name or uuid of the " + item + "s")
-        vim_item_list_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
-        vim_item_list_parser.set_defaults(func=vim_action, item=item, action="list")
-
-        vim_item_del_parser = subparsers.add_parser(command_name + '-delete', parents=[parent_parser], help="list the vim " + item + "s")
-        vim_item_del_parser.add_argument("name", help="name or uuid of the " + item + "s")
-        vim_item_del_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
-        vim_item_del_parser.set_defaults(func=vim_action, item=item, action="delete")
-
-        if item == "network" or item == "tenant":
-            vim_item_create_parser = subparsers.add_parser(command_name + '-create', parents=[parent_parser], help="create a "+item+" at vim")
-            vim_item_create_parser.add_argument("file", nargs='?', help="descriptor of the %s. Must be a file or yaml/json text" % item).completer = FilesCompleter
-            vim_item_create_parser.add_argument("--name", action="store", help="name of the %s" % item  )
-            vim_item_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
-            if item=="network":
-                vim_item_create_parser.add_argument("--type", action="store", help="type of network, data, ptp, bridge")
-                vim_item_create_parser.add_argument("--shared", action="store_true", help="Private or shared")
-                vim_item_create_parser.add_argument("--bind-net", action="store", help="For openvim datacenter type, net to be bind to, for vlan type, use sufix ':<vlan_tag>'")
-            else:
-                vim_item_create_parser.add_argument("--description", action="store", help="description of the %s" % item)
-            vim_item_create_parser.set_defaults(func=vim_action, item=item, action="create")
-
-    argcomplete.autocomplete(main_parser)
-
-    try:
-        args = main_parser.parse_args()
-        #logging info
-        level = logging.CRITICAL
-        streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
-        if "debug" in args and args.debug:
-            level = logging.DEBUG
-        logging.basicConfig(format=streamformat, level= level)
-        logger = logging.getLogger('mano')
-        logger.setLevel(level)
-        result = args.func(args)
-        if result == None:
-            result = 0
-        #for some reason it fails if call exit inside try instance. Need to call exit at the end !?
-    except (requests.exceptions.ConnectionError):
-        print "Connection error: not possible to contact OPENMANO-SERVER (openmanod)"
-        result = -2
-    except (KeyboardInterrupt):
-        print 'Exiting openmano'
-        result = -3
-    except (SystemExit, ArgumentParserError):
-        result = -4
-    except OpenmanoCLIError as e:
-        print str(e)
-        result = -5
-
-    #print result
-    exit(result)
-
diff --git a/openmanod b/openmanod
deleted file mode 100755 (executable)
index ecd9972..0000000
--- a/openmanod
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-openmano server.
-Main program that implements a reference NFVO (Network Functions Virtualisation Orchestrator).
-It interfaces with an NFV VIM through its API and offers a northbound interface, based on REST (openmano API),
-where NFV services are offered including the creation and deletion of VNF templates, VNF instances,
-network service templates and network service instances.
-
-It loads the configuration file and launches the http_server thread that will listen requests using openmano API.
-"""
-
-import time
-import sys
-import getopt
-import yaml
-from os import environ, path as os_path
-from jsonschema import validate as js_v, exceptions as js_e
-import logging
-import logging.handlers as log_handlers
-import socket
-
-from yaml import MarkedYAMLError
-
-from osm_ro import httpserver, nfvo, nfvo_db
-from osm_ro.openmano_schemas import config_schema
-from osm_ro.db_base import db_base_Exception
-from osm_ro.wim.engine import WimEngine
-from osm_ro.wim.persistence import WimPersistence
-import osm_ro
-
-__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
-__date__ = "$26-aug-2014 11:09:29$"
-__version__ = "6.0.2.post2"
-version_date = "Sep 2019"
-database_version = 39      # expected database schema version
-
-global global_config
-global logger
-
-
-class LoadConfigurationException(Exception):
-    pass
-
-
-def load_configuration(configuration_file):
-    default_tokens = {'http_port': 9090,
-                      'http_host': 'localhost',
-                      'http_console_proxy': True,
-                      'http_console_host': None,
-                      'log_level': 'DEBUG',
-                      'log_socket_port': 9022,
-                      'auto_push_VNF_to_VIMs': True,
-                      'db_host': 'localhost',
-                      'db_ovim_host': 'localhost'
-                      }
-    try:
-        # Check config file exists
-        with open(configuration_file, 'r') as f:
-            config_str = f.read()
-        # Parse configuration file
-        config = yaml.load(config_str)
-        # Validate configuration file with the config_schema
-        js_v(config, config_schema)
-
-        # Add default values tokens
-        for k, v in default_tokens.items():
-            if k not in config:
-                config[k] = v
-        return config
-
-    except yaml.YAMLError as e:
-        error_pos = ""
-        if isinstance(e, MarkedYAMLError):
-            mark = e.problem_mark
-            error_pos = " at line:{} column:{}".format(mark.line + 1, mark.column + 1)
-        raise LoadConfigurationException("Bad YAML format at configuration file '{file}'{pos}: {message}".format(
-            file=configuration_file, pos=error_pos, message=e))
-    except js_e.ValidationError as e:
-        error_pos = ""
-        if e.path:
-            error_pos = " at '" + ":".join(map(str, e.path)) + "'"
-        raise LoadConfigurationException("Invalid field at configuration file '{file}'{pos} {message}".format(
-            file=configuration_file, pos=error_pos, message=e))
-    except Exception as e:
-        raise LoadConfigurationException("Cannot load configuration file '{file}' {message}".format(
-            file=configuration_file, message=e))
-
-
-def console_port_iterator():
-    """
-    this iterator deals with the http_console_ports
-    returning the ports one by one
-    """
-    index = 0
-    while index < len(global_config["http_console_ports"]):
-        port = global_config["http_console_ports"][index]
-        if type(port) is int:
-            yield port
-        else:  # this is dictionary with from to keys
-            port2 = port["from"]
-            while port2 <= port["to"]:
-                yield port2
-                port2 += 1
-        index += 1
-
-
-def usage():
-    print("Usage: ", sys.argv[0], "[options]")
-    print("      -v|--version: prints current version")
-    print("      -c|--config [configuration_file]: loads the configuration file (default: openmanod.cfg)")
-    print("      -h|--help: shows this help")
-    print(
-        "      -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)")
-    print(
-        "      -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)")
-    print("      --log-socket-host HOST: send logs to this host")
-    print("      --log-socket-port PORT: send logs using this port (default: 9022)")
-    print("      --log-file FILE: send logs to this file")
-    print(
-        "      --create-tenant NAME: Try to creates this tenant name before starting, ignoring any errors as e.g. conflict")
-    return
-
-
-def set_logging_file(log_file):
-    try:
-        file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=100e6, backupCount=9, delay=0)
-        file_handler.setFormatter(log_formatter_simple)
-        logger.addHandler(file_handler)
-        # remove initial stream handler
-        logging.root.removeHandler(logging.root.handlers[0])
-        print ("logging on '{}'".format(log_file))
-    except IOError as e:
-        raise LoadConfigurationException(
-            "Cannot open logging file '{}': {}. Check folder exist and permissions".format(log_file, e))
-
-
-if __name__ == "__main__":
-    # env2config contains environ variable names and the correspondence with configuration file openmanod.cfg keys.
-    # If this environ is defined, this value is taken instead of the one at at configuration file
-    env2config = {
-        'RO_DB_HOST': 'db_host',
-        'RO_DB_NAME': 'db_name',
-        'RO_DB_USER': 'db_user',
-        'RO_DB_PASSWORD': 'db_passwd',
-        'RO_DB_OVIM_HOST': 'db_ovim_host',
-        'RO_DB_OVIM_NAME': 'db_ovim_name',
-        'RO_DB_OVIM_USER': 'db_ovim_user',
-        'RO_DB_OVIM_PASSWORD': 'db_ovim_passwd',
-        'RO_LOG_LEVEL': 'log_level',
-        'RO_LOG_FILE': 'log_file',
-    }
-    # Configure logging step 1
-    hostname = socket.gethostname()
-    log_formatter_str = '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'
-    log_formatter_complete = logging.Formatter(log_formatter_str.format(host=hostname), datefmt='%Y-%m-%dT%H:%M:%S')
-    log_format_simple = "%(asctime)s %(levelname)s  %(name)s %(thread)d %(filename)s:%(lineno)s %(message)s"
-    log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
-    logging.basicConfig(format=log_format_simple, level=logging.DEBUG)
-    logger = logging.getLogger('openmano')
-    logger.setLevel(logging.DEBUG)
-    socket_handler = None
-    # Read parameters and configuration file
-    httpthread = None
-    try:
-        # load parameters and configuration
-        opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:",
-                                   ["config=", "help", "version", "port=", "vnf-repository=", "adminport=",
-                                    "log-socket-host=", "log-socket-port=", "log-file=", "create-tenant="])
-        port = None
-        port_admin = None
-        config_file = 'osm_ro/openmanod.cfg'
-        vnf_repository = None
-        log_file = None
-        log_socket_host = None
-        log_socket_port = None
-        create_tenant = None
-
-        for o, a in opts:
-            if o in ("-v", "--version"):
-                print ("openmanod version " + __version__ + ' ' + version_date)
-                print ("(c) Copyright Telefonica")
-                sys.exit()
-            elif o in ("-h", "--help"):
-                usage()
-                sys.exit()
-            elif o in ("-V", "--vnf-repository"):
-                vnf_repository = a
-            elif o in ("-c", "--config"):
-                config_file = a
-            elif o in ("-p", "--port"):
-                port = a
-            elif o in ("-P", "--adminport"):
-                port_admin = a
-            elif o == "--log-socket-port":
-                log_socket_port = a
-            elif o == "--log-socket-host":
-                log_socket_host = a
-            elif o == "--log-file":
-                log_file = a
-            elif o == "--create-tenant":
-                create_tenant = a
-            else:
-                assert False, "Unhandled option"
-        if log_file:
-            set_logging_file(log_file)
-        global_config = load_configuration(config_file)
-        global_config["version"] = __version__
-        global_config["version_date"] = version_date
-        # Override parameters obtained by command line on ENV
-        if port:
-            global_config['http_port'] = port
-        if port_admin:
-            global_config['http_admin_port'] = port_admin
-        if log_socket_host:
-            global_config['log_socket_host'] = log_socket_host
-        if log_socket_port:
-            global_config['log_socket_port'] = log_socket_port
-
-        # override with ENV
-        for env_k, env_v in environ.items():
-            try:
-                if not env_k.startswith("RO_") or env_k not in env2config or not env_v:
-                    continue
-                global_config[env2config[env_k]] = env_v
-                if env_k.endswith("PORT"):  # convert to int, skip if not possible
-                    global_config[env2config[env_k]] = int(env_v)
-            except Exception as e:
-                logger.warn("skipping environ '{}={}' because exception '{}'".format(env_k, env_v, e))
-
-        global_config["console_port_iterator"] = console_port_iterator
-        global_config["console_thread"] = {}
-        global_config["console_ports"] = {}
-        if not global_config["http_console_host"]:
-            global_config["http_console_host"] = global_config["http_host"]
-            if global_config["http_host"] == "0.0.0.0":
-                global_config["http_console_host"] = socket.gethostname()
-
-        # Configure logging STEP 2
-        if "log_host" in global_config:
-            socket_handler = log_handlers.SocketHandler(global_config["log_socket_host"],
-                                                        global_config["log_socket_port"])
-            socket_handler.setFormatter(log_formatter_complete)
-            if global_config.get("log_socket_level") \
-                    and global_config["log_socket_level"] != global_config["log_level"]:
-                socket_handler.setLevel(global_config["log_socket_level"])
-            logger.addHandler(socket_handler)
-
-        if log_file:
-            global_config['log_file'] = log_file
-        elif global_config.get('log_file'):
-            set_logging_file(global_config['log_file'])
-
-        logger.setLevel(getattr(logging, global_config['log_level']))
-        logger.critical("Starting openmano server version: '%s %s' command: '%s'",
-                        __version__, version_date, " ".join(sys.argv))
-
-        for log_module in ("nfvo", "http", "vim", "wim", "db", "console", "ovim"):
-            log_level_module = "log_level_" + log_module
-            log_file_module = "log_file_" + log_module
-            logger_module = logging.getLogger('openmano.' + log_module)
-            if log_level_module in global_config:
-                logger_module.setLevel(global_config[log_level_module])
-            if log_file_module in global_config:
-                try:
-                    file_handler = logging.handlers.RotatingFileHandler(global_config[log_file_module],
-                                                                        maxBytes=100e6, backupCount=9, delay=0)
-                    file_handler.setFormatter(log_formatter_simple)
-                    logger_module.addHandler(file_handler)
-                except IOError as e:
-                    raise LoadConfigurationException(
-                        "Cannot open logging file '{}': {}. Check folder exist and permissions".format(
-                            global_config[log_file_module], str(e)))
-            global_config["logger_" + log_module] = logger_module
-
-        # Initialize DB connection
-        mydb = nfvo_db.nfvo_db()
-        mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'],
-                     global_config['db_name'])
-        db_path = osm_ro.__path__[0] + "/database_utils"
-        if not os_path.exists(db_path + "/migrate_mano_db.sh"):
-            db_path = osm_ro.__path__[0] + "/../database_utils"
-        try:
-            r = mydb.get_db_version()
-            if r[0] != database_version:
-                logger.critical("DATABASE wrong version '{current}'. Try to upgrade/downgrade to version '{target}'"
-                                " with '{db_path}/migrate_mano_db.sh {target}'".format(current=r[0],
-                                                                                       target=database_version,
-                                                                                       db_path=db_path))
-                exit(-1)
-        except db_base_Exception as e:
-            logger.critical("DATABASE is not valid. If you think it is corrupted, you can init it with"
-                            " '{db_path}/init_mano_db.sh' script".format(db_path=db_path))
-            exit(-1)
-
-        nfvo.global_config = global_config
-        if create_tenant:
-            try:
-                nfvo.new_tenant(mydb, {"name": create_tenant})
-            except Exception as e:
-                if isinstance(e, nfvo.NfvoException) and e.http_code == 409:
-                    pass  # if tenant exist (NfvoException error 409), ignore
-                else:  # otherwise print and error and continue
-                    logger.error("Cannot create tenant '{}': {}".format(create_tenant, e))
-
-        # WIM module
-        wim_persistence = WimPersistence(mydb)
-        wim_engine = WimEngine(wim_persistence)
-        # ---
-        nfvo.start_service(mydb, wim_persistence, wim_engine)
-
-        httpthread = httpserver.httpserver(
-            mydb, False,
-            global_config['http_host'], global_config['http_port'],
-            wim_persistence, wim_engine
-        )
-
-        httpthread.start()
-        if 'http_admin_port' in global_config:
-            httpthreadadmin = httpserver.httpserver(mydb, True, global_config['http_host'],
-                                                    global_config['http_admin_port'])
-            httpthreadadmin.start()
-        time.sleep(1)
-        logger.info('Waiting for http clients')
-        print('Waiting for http clients')
-        print('openmanod ready')
-        print('====================')
-        time.sleep(20)
-        sys.stdout.flush()
-
-        # TODO: Interactive console must be implemented here instead of join or sleep
-
-        # httpthread.join()
-        # if 'http_admin_port' in global_config:
-        #    httpthreadadmin.join()
-        while True:
-            time.sleep(86400)
-
-    except KeyboardInterrupt as e:
-        logger.info(str(e))
-    except SystemExit:
-        pass
-    except getopt.GetoptError as e:
-        logger.critical(str(e))  # will print something like "option -a not recognized"
-        exit(-1)
-    except LoadConfigurationException as e:
-        logger.critical(str(e))
-        exit(-1)
-    except db_base_Exception as e:
-        logger.critical(str(e))
-        exit(-1)
-    except nfvo.NfvoException as e:
-        logger.critical(str(e), exc_info=True)
-        exit(-1)
-    nfvo.stop_service()
-    if httpthread:
-        httpthread.join(1)
diff --git a/osm_ro/__init__.py b/osm_ro/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/osm_ro/console_proxy_thread.py b/osm_ro/console_proxy_thread.py
deleted file mode 100644 (file)
index 032c774..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-Implement like a proxy for TCP/IP in a separated thread.
-It creates two sockets to bypass the TCP/IP packets among the fix console 
-server specified at class construction (console_host, console_port)
-and a client that connect against the (host, port) specified also at construction
-
-                ---------------------           -------------------------------
-                |       OPENMANO     |          |         VIM                  |
-client 1  ----> | ConsoleProxyThread | ------>  |  Console server              |
-client 2  ----> |  (host, port)      | ------>  |(console_host, console_server)|
-   ...           --------------------            ------------------------------
-'''
-__author__="Alfonso Tierno"
-__date__ ="$19-nov-2015 09:07:15$"
-
-import socket
-import select
-import threading
-import logging
-
-
-class ConsoleProxyException(Exception):
-    '''raise when an exception has found''' 
-class ConsoleProxyExceptionPortUsed(ConsoleProxyException):
-    '''raise when the port is used''' 
-
-class ConsoleProxyThread(threading.Thread):
-    buffer_size = 4096
-    check_finish = 1 #frequency to check if requested to end in seconds
-
-    def __init__(self, host, port, console_host, console_port, log_level=None):
-        try:
-            threading.Thread.__init__(self)
-            self.console_host = console_host
-            self.console_port = console_port
-            self.host = host
-            self.port = port
-            self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-            self.server.bind((host, port))
-            self.server.listen(200)
-            #TODO timeout in a lock section can be used to autoterminate the thread
-            #when inactivity and timeout<time : set timeout=0 and terminate
-            #from outside, close class when timeout==0; set timeout=time+120 when adding a new console on this thread
-            #set self.timeout = time.time() + 120 at init
-            self.name = "ConsoleProxy " + console_host + ":" + str(console_port)
-            self.input_list = [self.server]
-            self.channel = {}
-            self.terminate = False #put at True from outside to force termination
-            self.logger = logging.getLogger('openmano.console')
-            if log_level:
-                self.logger.setLevel( getattr(logging, log_level) )
-
-        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
-            if e is socket.error and e.errno==98:
-                raise ConsoleProxyExceptionPortUsed("socket.error " + str(e))
-            raise ConsoleProxyException(type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0])) )
-        
-    def run(self):
-        while 1:
-            try:
-                inputready, _, _ = select.select(self.input_list, [], [], self.check_finish)
-            except select.error as e:
-                self.logger.error("Exception on select %s: %s", type(e).__name__, str(e) )
-                self.on_terminate()
-
-            if self.terminate:
-                self.on_terminate()
-                self.logger.debug("Terminate because commanded")
-                break
-            
-            for sock in inputready:
-                if sock == self.server:
-                    self.on_accept()
-                else:
-                    self.on_recv(sock)
-                    
-    def on_terminate(self):
-        while self.input_list:
-            if self.input_list[0] is self.server:
-                self.server.close()
-                del self.input_list[0]
-            else:
-                self.on_close(self.input_list[0], "Terminating thread")
-
-    def on_accept(self):
-        #accept
-        try:
-            clientsock, clientaddr = self.server.accept()
-        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
-            self.logger.error("Exception on_accept %s: %s", type(e).__name__, str(e) )
-            return False
-        #print self.name, ": Accept new client ", clientaddr
-
-        #connect
-        try:
-            forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            forward.connect((self.console_host, self.console_port))
-            name = "%s:%d => (%s:%d => %s:%d) => %s:%d" %\
-                (clientsock.getpeername() + clientsock.getsockname()  + forward.getsockname() + forward.getpeername() )
-            self.logger.warn("new connection " + name)
-                
-            self.input_list.append(clientsock)
-            self.input_list.append(forward)
-            info = { "name": name,
-                    "clientsock" : clientsock,
-                    "serversock" : forward
-                    }
-            self.channel[clientsock] = info
-            self.channel[forward] = info
-            return True
-        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
-            self.logger.error("Exception on_connect to server %s:%d; %s: %s  Close client side %s",
-                self.console_host, self.console_port, type(e).__name__, str(e), str(clientaddr) )
-            clientsock.close()
-            return False
-
-    def on_close(self, sock, cause):
-        if sock not in self.channel:
-            return  #can happen if there is data ready to received at both sides and the channel has been deleted. QUITE IMPROBABLE but just in case
-        info = self.channel[sock]
-        #debug info
-        sockname = "client" if sock is info["clientsock"] else "server"
-        self.logger.warn("del connection %s %s at %s side", info["name"], str(cause), str(sockname) )
-        #close sockets
-        try:
-            # close the connection with client
-            info["clientsock"].close()  # equivalent to do self.s.close()
-        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
-            self.logger.error("Exception on_close client socket %s: %s", type(e).__name__, str(e) )
-        try:
-            # close the connection with remote server
-            info["serversock"].close()
-        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
-            self.logger.error("Exception on_close server socket %s: %s", type(e).__name__, str(e) )
-        
-        #remove objects from input_list
-        self.input_list.remove(info["clientsock"])
-        self.input_list.remove(info["serversock"])
-        # delete both objects from channel dict
-        del self.channel[info["clientsock"]]
-        del self.channel[info["serversock"]]
-
-    def on_recv(self, sock):
-        if sock not in self.channel:
-            return  #can happen if there is data ready to received at both sides and the channel has been deleted. QUITE IMPROBABLE but just in case
-        info = self.channel[sock]
-        peersock = info["serversock"] if sock is info["clientsock"] else info["clientsock"]
-        try:
-            data = sock.recv(self.buffer_size)
-            if len(data) == 0:
-                self.on_close(sock, "peer closed")
-            else:
-                #print self.data
-                sock = peersock
-                peersock.send(data)
-        except (socket.error, socket.herror, socket.gaierror, socket.timeout) as e:
-            #print self.name, ": Exception %s: %s" % (type(e).__name__, str(e) )
-            self.on_close(sock, "Exception %s: %s" % (type(e).__name__, str(e) ))
-
-        
-
-    #def start_timeout(self):
-    #    self.timeout = time.time() + 120
-        
diff --git a/osm_ro/db_base.py b/osm_ro/db_base.py
deleted file mode 100644 (file)
index e6e1134..0000000
+++ /dev/null
@@ -1,816 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-Base class for openmano database manipulation
-'''
-__author__="Alfonso Tierno"
-__date__ ="$4-Apr-2016 10:05:01$"
-
-import MySQLdb as mdb
-import uuid as myUuid
-import  utils as af
-import json
-#import yaml
-import time
-import logging
-import datetime
-from contextlib import contextmanager
-from functools import wraps, partial
-from threading import Lock
-from jsonschema import validate as js_v, exceptions as js_e
-
-from .http_tools import errors as httperrors
-from .utils import Attempt, get_arg, inject_args
-
-
-RECOVERY_TIME = 3
-
-_ATTEMPT = Attempt()
-
-
-def with_transaction(fn=None, cursor=None):
-    """Decorator that can be used together with instances of the ``db_base``
-    class, to perform database actions wrapped in a commit/rollback fashion
-
-    This decorator basically executes the function inside the context object
-    given by the ``transaction`` method in ``db_base``
-
-    Arguments:
-        cursor: [Optional] cursor class
-    """
-    if fn is None:  # Allows calling the decorator directly or with parameters
-        return partial(with_transaction, cursor=cursor)
-
-    @wraps(fn)
-    def _wrapper(self, *args, **kwargs):
-        cursor_type = None
-        if cursor == 'dict':
-            # MySQLdB define the "cursors" module attribute lazily,
-            # so we have to defer references to mdb.cursors.DictCursor
-            cursor_type = mdb.cursors.DictCursor
-
-        with self.transaction(cursor_type):
-            return fn(self, *args, **kwargs)
-
-    return _wrapper
-
-
-def retry(fn=None, max_attempts=Attempt.MAX, **info):
-    """Decorator that can be used together with instances of the ``db_base``
-    class, to replay a method again after a unexpected error.
-
-    The function being decorated needs to either be a method of ``db_base``
-    subclasses or accept an ``db_base`` instance as the first parameter.
-
-    All the extra keyword arguments will be passed to the ``_format_error``
-    method
-    """
-    if fn is None:  # Allows calling the decorator directly or with parameters
-        return partial(retry, max_attempts=max_attempts, **info)
-
-    @wraps(fn)
-    def _wrapper(*args, **kwargs):
-        self = args[0]
-        info.setdefault('table', get_arg('table', fn, args, kwargs))
-        attempt = Attempt(max_attempts=max_attempts, info=info)
-        while attempt.countdown >= 0:
-            try:
-                return inject_args(fn, attempt=attempt)(*args, **kwargs)
-            except (mdb.Error, AttributeError) as ex:
-                self.logger.debug("Attempt #%d", attempt.number)
-                try:
-                    # The format error will throw exceptions, however it can
-                    # tolerate a certain amount of retries if it judges that
-                    # the error can be solved with retrying
-                    self._format_error(ex, attempt.countdown, **attempt.info)
-                    # Anyway, unexpected/unknown errors can still be retried
-                except db_base_Exception as db_ex:
-                    if (attempt.countdown < 0 or db_ex.http_code !=
-                            httperrors.Internal_Server_Error):
-                        raise
-
-            attempt.count += 1
-
-    return _wrapper
-
-
-def _check_valid_uuid(uuid):
-    id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-    id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
-    try:
-        js_v(uuid, id_schema)
-        return True
-    except js_e.ValidationError:
-        try:
-            js_v(uuid, id_schema2)
-            return True
-        except js_e.ValidationError:
-            return False
-    return False
-
-def _convert_datetime2str(var):
-    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
-    It enters recursively in the dict var finding this kind of variables
-    '''
-    if type(var) is dict:
-        for k,v in var.items():
-            if type(v) is datetime.datetime:
-                var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
-            elif type(v) is dict or type(v) is list or type(v) is tuple:
-                _convert_datetime2str(v)
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for v in var:
-            _convert_datetime2str(v)
-
-def _convert_bandwidth(data, reverse=False, logger=None):
-    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
-    It assumes that bandwidth is well formed
-    Attributes:
-        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
-        'reverse': by default convert form str to int (Mbps), if True it convert from number to units
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                _convert_bandwidth(data[k], reverse, logger)
-        if "bandwidth" in data:
-            try:
-                value=str(data["bandwidth"])
-                if not reverse:
-                    pos = value.find("bps")
-                    if pos>0:
-                        if value[pos-1]=="G": data["bandwidth"] =  int(data["bandwidth"][:pos-1]) * 1000
-                        elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
-                        else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
-                else:
-                    value = int(data["bandwidth"])
-                    if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
-                    else: data["bandwidth"]=str(value) + " Mbps"
-            except:
-                if logger:
-                    logger.error("convert_bandwidth exception for type '%s' data '%s'", type(data["bandwidth"]), data["bandwidth"])
-                return
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                _convert_bandwidth(k, reverse, logger)
-
-def _convert_str2boolean(data, items):
-    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
-    Done recursively
-    Attributes:
-        'data': dictionary variable to be checked. None or empty is considered valid
-        'items': tuple of keys to convert
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                _convert_str2boolean(data[k], items)
-            if k in items:
-                if type(data[k]) is str:
-                    if   data[k]=="false" or data[k]=="False" or data[k]=="0": data[k]=False
-                    elif data[k]=="true"  or data[k]=="True" or data[k]=="1":  data[k]=True
-                elif type(data[k]) is int:
-                    if   data[k]==0: data[k]=False
-                    elif  data[k]==1:  data[k]=True
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                _convert_str2boolean(k, items)
-
-class db_base_Exception(httperrors.HttpMappedError):
-    '''Common Exception for all database exceptions'''
-
-    def __init__(self, message, http_code=httperrors.Bad_Request):
-        super(db_base_Exception, self).__init__(message, http_code)
-
-class db_base():
-    tables_with_created_field=()
-
-    def __init__(self, host=None, user=None, passwd=None, database=None,
-                 log_name='db', log_level=None, lock=None):
-        self.host = host
-        self.user = user
-        self.passwd = passwd
-        self.database = database
-        self.con = None
-        self.log_level=log_level
-        self.logger = logging.getLogger(log_name)
-        if self.log_level:
-            self.logger.setLevel( getattr(logging, log_level) )
-        self.lock = lock or Lock()
-
-    def connect(self, host=None, user=None, passwd=None, database=None):
-        '''Connect to specific data base.
-        The first time a valid host, user, passwd and database must be provided,
-        Following calls can skip this parameters
-        '''
-        try:
-            if host:        self.host = host
-            if user:        self.user = user
-            if passwd:      self.passwd = passwd
-            if database:    self.database = database
-
-            self.con = mdb.connect(self.host, self.user, self.passwd, self.database)
-            self.logger.debug("DB: connected to '%s' at '%s@%s'", self.database, self.user, self.host)
-        except mdb.Error as e:
-            raise db_base_Exception("Cannot connect to DataBase '{}' at '{}@{}' Error {}: {}".format(
-                                    self.database, self.user, self.host, e.args[0], e.args[1]),
-                                    http_code = httperrors.Unauthorized )
-
-    def escape(self, value):
-        return self.con.escape(value)
-
-    def escape_string(self, value):
-        if isinstance(value, unicode):
-            value = value.encode("utf8")
-        return self.con.escape_string(value)
-
-    @retry
-    @with_transaction
-    def get_db_version(self):
-        ''' Obtain the database schema version.
-        Return: (negative, text) if error or version 0.0 where schema_version table is missing
-                (version_int, version_text) if ok
-        '''
-        cmd = "SELECT version_int,version FROM schema_version"
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-        highest_version_int=0
-        highest_version=""
-        for row in rows: #look for the latest version
-            if row[0]>highest_version_int:
-                highest_version_int, highest_version = row[0:2]
-        return highest_version_int, highest_version
-
-    def disconnect(self):
-        '''disconnect from specific data base'''
-        try:
-            self.con.close()
-            self.con = None
-        except mdb.Error as e:
-            self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
-            return
-        except AttributeError as e: #self.con not defined
-            if e[0][-5:] == "'con'":
-                self.logger.warn("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
-                return
-            else:
-                raise
-
-    def reconnect(self):
-        """Try to gracefully to the database in case of error"""
-        try:
-            self.con.ping(True)  # auto-reconnect if the server is available
-        except:
-            # The server is probably not available...
-            # Let's wait a bit
-            time.sleep(RECOVERY_TIME)
-            self.con = None
-            self.connect()
-
-    def fork_connection(self):
-        """Return a new database object, with a separated connection to the
-        database (and lock), so it can act independently
-        """
-        obj =  self.__class__(
-            host=self.host,
-            user=self.user,
-            passwd=self.passwd,
-            database=self.database,
-            log_name=self.logger.name,
-            log_level=self.log_level,
-            lock=Lock()
-        )
-
-        obj.connect()
-
-        return obj
-
-    @contextmanager
-    def transaction(self, cursor_type=None):
-        """DB changes that are executed inside this context will be
-        automatically rolled back in case of error.
-
-        This implementation also adds a lock, so threads sharing the same
-        connection object are synchronized.
-
-        Arguments:
-            cursor_type: default: MySQLdb.cursors.DictCursor
-
-        Yields:
-            Cursor object
-
-        References:
-            https://www.oreilly.com/library/view/mysql-cookbook-2nd/059652708X/ch15s08.html
-            https://github.com/PyMySQL/mysqlclient-python/commit/c64915b1e5c705f4fb10e86db5dcfed0b58552cc
-        """
-        # Previously MySQLdb had built-in support for that using the context
-        # API for the connection object.
-        # This support was removed in version 1.40
-        # https://github.com/PyMySQL/mysqlclient-python/blob/master/HISTORY.rst#whats-new-in-140
-        with self.lock:
-            try:
-                if self.con.get_autocommit():
-                    self.con.query("BEGIN")
-
-                self.cur = self.con.cursor(cursor_type)
-                yield self.cur
-            except:  # noqa
-                self.con.rollback()
-                raise
-            else:
-                self.con.commit()
-
-
-    def _format_error(self, e, tries=1, command=None,
-                      extra=None, table=None, cmd=None, **_):
-        '''Creates a text error base on the produced exception
-            Params:
-                e: mdb exception
-                retry: in case of timeout, if reconnecting to database and retry, or raise and exception
-                cmd: database command that produce the exception
-                command: if the intention is update or delete
-                extra: extra information to add to some commands
-            Return
-                HTTP error in negative, formatted error text
-        '''  # the **_ ignores extra kwargs
-        table_info = ' (table `{}`)'.format(table) if table else ''
-        if cmd:
-            self.logger.debug("Exception '%s' with command '%s'%s",
-                              e, cmd, table_info)
-
-        if isinstance(e,AttributeError ):
-            self.logger.debug(str(e), exc_info=True)
-            raise db_base_Exception("DB Exception " + str(e), httperrors.Internal_Server_Error)
-        if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or)))    Exception 2013: Lost connection to MySQL server during query
-            # Let's aways reconnect if the connection is lost
-            # so future calls are not affected.
-            self.reconnect()
-
-            if tries > 1:
-                self.logger.warn("DB Exception '%s'. Retry", str(e))
-                return
-            else:
-                raise db_base_Exception("Database connection timeout Try Again", httperrors.Request_Timeout)
-
-        fk=e.args[1].find("foreign key constraint fails")
-        if fk>=0:
-            if command=="update":
-                raise db_base_Exception("tenant_id '{}' not found.".format(extra), httperrors.Not_Found)
-            elif command=="delete":
-                raise db_base_Exception("Resource is not free. There are {} that prevent deleting it.".format(extra), httperrors.Conflict)
-        de = e.args[1].find("Duplicate entry")
-        fk = e.args[1].find("for key")
-        uk = e.args[1].find("Unknown column")
-        wc = e.args[1].find("in 'where clause'")
-        fl = e.args[1].find("in 'field list'")
-        #print de, fk, uk, wc,fl
-        if de>=0:
-            if fk>=0: #error 1062
-                raise db_base_Exception(
-                    "Value {} already in use for {}{}".format(
-                        e.args[1][de+15:fk], e.args[1][fk+7:], table_info),
-                    httperrors.Conflict)
-        if uk>=0:
-            if wc>=0:
-                raise db_base_Exception(
-                    "Field {} can not be used for filtering{}".format(
-                        e.args[1][uk+14:wc], table_info),
-                    httperrors.Bad_Request)
-            if fl>=0:
-                raise db_base_Exception(
-                    "Field {} does not exist{}".format(
-                        e.args[1][uk+14:wc], table_info),
-                    httperrors.Bad_Request)
-        raise db_base_Exception(
-                "Database internal Error{} {}: {}".format(
-                    table_info, e.args[0], e.args[1]),
-                httperrors.Internal_Server_Error)
-
-    def __str2db_format(self, data):
-        """Convert string data to database format.
-        If data is None it returns the 'Null' text,
-        otherwise it returns the text surrounded by quotes ensuring internal quotes are escaped.
-        """
-        if data is None:
-            return 'Null'
-        elif isinstance(data[1], (str, unicode)):
-            return json.dumps(data)
-        else:
-            return json.dumps(str(data))
-
-    def __tuple2db_format_set(self, data):
-        """Compose the needed text for a SQL SET, parameter 'data' is a pair tuple (A,B),
-        and it returns the text 'A="B"', where A is a field of a table and B is the value
-        If B is None it returns the 'A=Null' text, without surrounding Null by quotes
-        If B is not None it returns the text "A='B'" or 'A="B"' where B is surrounded by quotes,
-        and it ensures internal quotes of B are escaped.
-        B can be also a dict with special keys:
-            {"INCREMENT": NUMBER}, then it produce "A=A+NUMBER"
-        """
-        if data[1] is None:
-            return str(data[0]) + "=Null"
-        elif isinstance(data[1], (str, unicode)):
-            return str(data[0]) + '=' + json.dumps(data[1])
-        elif isinstance(data[1], dict):
-            if "INCREMENT" in data[1]:
-                return "{A}={A}{N:+d}".format(A=data[0], N=data[1]["INCREMENT"])
-            raise db_base_Exception("Format error for UPDATE field: {!r}".format(data[0]))
-        else:
-            return str(data[0]) + '=' + json.dumps(str(data[1]))
-
-    def __create_where(self, data, use_or=None):
-        """
-        Compose the needed text for a SQL WHERE, parameter 'data' can be a dict or a list of dict. By default lists are
-        concatenated with OR and dict with AND, unless parameter 'use_or' indicates other thing.
-        If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
-            If value is None, it will produce 'key is null'
-            If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
-            keys can be suffixed by >,<,<>,>=,<=,' LIKE ' so that this is used to compare key and value instead of "="
-        The special keys "OR", "AND" with a dict value is used to create a nested WHERE
-        If a list, each item will be a dictionary that will be concatenated with OR by default
-        :param data: dict or list of dicts
-        :param use_or: Can be None (use default behaviour), True (use OR) or False (use AND)
-        :return: a string with the content to send to mysql
-        """
-        cmd = []
-        if isinstance(data, dict):
-            for k, v in data.items():
-                if k == "OR":
-                    cmd.append("(" + self.__create_where(v, use_or=True) + ")")
-                    continue
-                elif k == "AND":
-                    cmd.append("(" + self.__create_where(v, use_or=False) + ")")
-                    continue
-
-                if k.endswith(">") or k.endswith("<") or k.endswith("=") or k.endswith(" LIKE "):
-                    pass
-                else:
-                    k += "="
-
-                if v is None:
-                    cmd.append(k.replace("=", " is").replace("<>", " is not") + " Null")
-                elif isinstance(v, (tuple, list)):
-                    cmd2 = []
-                    for v2 in v:
-                        if v2 is None:
-                            cmd2.append(k.replace("=", " is").replace("<>", " is not") + " Null")
-                        elif isinstance(v2, (str, unicode)):
-                            cmd2.append(k + json.dumps(v2))
-                        else:
-                            cmd2.append(k + json.dumps(str(v2)))
-                    cmd.append("(" + " OR ".join(cmd2) + ")")
-                elif isinstance(v, (str, unicode)):
-                    cmd.append(k + json.dumps(v))
-                else:
-                    cmd.append(k + json.dumps(str(v)))
-        elif isinstance(data, (tuple, list)):
-            if use_or is None:
-                use_or = True
-            for k in data:
-                cmd.append("(" + self.__create_where(k) + ")")
-        else:
-            raise db_base_Exception("invalid WHERE clause at '{}'".format(data))
-        if use_or:
-            return " OR ".join(cmd)
-        return " AND ".join(cmd)
-
-    def __remove_quotes(self, data):
-        '''remove single quotes ' of any string content of data dictionary'''
-        for k,v in data.items():
-            if type(v) == str:
-                if "'" in v:
-                    data[k] = data[k].replace("'","_")
-
-    def _update_rows(self, table, UPDATE, WHERE, modified_time=0):
-        """ Update one or several rows of a table.
-        :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
-        :param table: database table to update
-        :param WHERE: dict or list of dicts to compose the SQL WHERE clause.
-            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
-                If value is None, it will produce 'key is null'
-                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
-                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
-                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
-            If a list, each item will be a dictionary that will be concatenated with OR
-        :return: the number of updated rows, raises exception upon error
-        """
-        # gettting uuid
-        values = ",".join(map(self.__tuple2db_format_set, UPDATE.iteritems() ))
-        if modified_time:
-            values += ",modified_at={:f}".format(modified_time)
-        cmd= "UPDATE " + table + " SET " + values + " WHERE " + self.__create_where(WHERE)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        return self.cur.rowcount
-
-    def _new_uuid(self, root_uuid=None, used_table=None, created_time=0):
-        """
-        Generate a new uuid. It DOES NOT begin or end the transaction, so self.con.cursor must be created
-        :param root_uuid: master uuid of the transaction
-        :param used_table: the table this uuid is intended for
-        :param created_time: time of creation
-        :return: the created uuid
-        """
-
-        uuid = str(myUuid.uuid1())
-        # defining root_uuid if not provided
-        if root_uuid is None:
-            root_uuid = uuid
-        if created_time:
-            created_at = created_time
-        else:
-            created_at = time.time()
-        # inserting new uuid
-        cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(
-            uuid, root_uuid, used_table, created_at)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        return uuid
-
-    def _new_row_internal(self, table, INSERT, add_uuid=False, root_uuid=None, created_time=0, confidential_data=False):
-        ''' Add one row into a table. It DOES NOT begin or end the transaction, so self.con.cursor must be created
-        Attribute
-            INSERT: dictionary with the key:value to insert
-            table: table where to insert
-            add_uuid: if True, it will create an uuid key entry at INSERT if not provided
-            created_time: time to add to the created_at column
-        It checks presence of uuid and add one automatically otherwise
-        Return: uuid
-        '''
-
-        if add_uuid:
-            #create uuid if not provided
-            if 'uuid' not in INSERT:
-                uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
-            else:
-                uuid = str(INSERT['uuid'])
-        else:
-            uuid=None
-        if add_uuid:
-            #defining root_uuid if not provided
-            if root_uuid is None:
-                root_uuid = uuid
-            if created_time:
-                created_at = created_time
-            else:
-                created_at=time.time()
-            #inserting new uuid
-            cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(uuid, root_uuid, table, created_at)
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-        #insertion
-        cmd= "INSERT INTO " + table +" SET " + \
-            ",".join(map(self.__tuple2db_format_set, INSERT.iteritems() ))
-        if created_time:
-            cmd += ",created_at={time:.9f},modified_at={time:.9f}".format(time=created_time)
-        if confidential_data:
-            index = cmd.find("SET")
-            subcmd = cmd[:index] + 'SET...'
-            self.logger.debug(subcmd)
-        else:
-            self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        self.cur.rowcount
-        return uuid
-
-    def _get_rows(self,table,uuid):
-        cmd = "SELECT * FROM {} WHERE uuid='{}'".format(str(table), str(uuid))
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-        return rows
-
-    @retry
-    @with_transaction
-    def new_row(self, table, INSERT, add_uuid=False, created_time=0, confidential_data=False):
-        ''' Add one row into a table.
-        Attribute
-            INSERT: dictionary with the key: value to insert
-            table: table where to insert
-            tenant_id: only useful for logs. If provided, logs will use this tenant_id
-            add_uuid: if True, it will create an uuid key entry at INSERT if not provided
-        It checks presence of uuid and add one automatically otherwise
-        Return: uuid
-        '''
-        if table in self.tables_with_created_field and created_time==0:
-            created_time=time.time()
-        return self._new_row_internal(table, INSERT, add_uuid, None, created_time, confidential_data)
-
-    @retry
-    @with_transaction
-    def update_rows(self, table, UPDATE, WHERE, modified_time=None, attempt=_ATTEMPT):
-        """ Update one or several rows of a table.
-        :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
-        :param table: database table to update
-        :param WHERE: dict or list of dicts to compose the SQL WHERE clause.
-            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
-                If value is None, it will produce 'key is null'
-                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
-                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
-                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
-            If a list, each item will be a dictionary that will be concatenated with OR
-        :param modified_time: Can contain the time to be set to the table row.
-            None to set automatically, 0 to do not modify it
-        :return: the number of updated rows, raises exception upon error
-        """
-        if table in self.tables_with_created_field and modified_time is None:
-            modified_time = time.time()
-
-        return self._update_rows(table, UPDATE, WHERE, modified_time)
-
-    def _delete_row_by_id_internal(self, table, uuid):
-        cmd = "DELETE FROM {} WHERE uuid = '{}'".format(table, uuid)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        deleted = self.cur.rowcount
-        # delete uuid
-        self.cur = self.con.cursor()
-        cmd = "DELETE FROM uuids WHERE root_uuid = '{}'".format(uuid)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        return deleted
-
-    @retry(command='delete', extra='dependencies')
-    @with_transaction
-    def delete_row_by_id(self, table, uuid):
-        return self._delete_row_by_id_internal(table, uuid)
-
-    @retry
-    def delete_row(self, attempt=_ATTEMPT, **sql_dict):
-        """ Deletes rows from a table.
-        :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
-        :param FROM: string with table name (Mandatory)
-        :param WHERE: dict or list of dicts to compose the SQL WHERE clause. (Optional)
-            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
-                If value is None, it will produce 'key is null'
-                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
-                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
-                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
-            If a list, each item will be a dictionary that will be concatenated with OR
-        :return: the number of deleted rows, raises exception upon error
-        """
-        # print sql_dict
-        cmd = "DELETE FROM " + str(sql_dict['FROM'])
-        if sql_dict.get('WHERE'):
-            cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
-        if sql_dict.get('LIMIT'):
-            cmd += " LIMIT " + str(sql_dict['LIMIT'])
-
-        attempt.info['cmd'] = cmd
-
-        with self.transaction():
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            deleted = self.cur.rowcount
-        return deleted
-
-    @retry
-    @with_transaction(cursor='dict')
-    def get_rows_by_id(self, table, uuid, attempt=_ATTEMPT):
-        '''get row from a table based on uuid'''
-        cmd="SELECT * FROM {} where uuid='{}'".format(str(table), str(uuid))
-        attempt.info['cmd'] = cmd
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-        return rows
-
-    @retry
-    def get_rows(self, attempt=_ATTEMPT, **sql_dict):
-        """ Obtain rows from a table.
-        :param SELECT: list or tuple of fields to retrieve) (by default all)
-        :param FROM: string with table name (Mandatory)
-        :param WHERE: dict or list of dicts to compose the SQL WHERE clause. (Optional)
-            If a dict it will generate 'key1="value1" AND key2="value2" AND ...'.
-                If value is None, it will produce 'key is null'
-                If value is a list or tuple, it will produce 'key="value[0]" OR key="value[1]" OR ...'
-                keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
-                The special keys "OR", "AND" with a dict value is used to create a nested WHERE
-            If a list, each item will be a dictionary that will be concatenated with OR
-        :param LIMIT: limit the number of obtained entries (Optional)
-        :param ORDER_BY:  list or tuple of fields to order, add ' DESC' to each item if inverse order is required
-        :return: a list with dictionaries at each row, raises exception upon error
-        """
-        # print sql_dict
-        cmd = "SELECT "
-        if 'SELECT' in sql_dict:
-            if isinstance(sql_dict['SELECT'], (tuple, list)):
-                cmd += ",".join(map(str, sql_dict['SELECT']))
-            else:
-                cmd += sql_dict['SELECT']
-        else:
-            cmd += "*"
-
-        cmd += " FROM " + str(sql_dict['FROM'])
-        if sql_dict.get('WHERE'):
-            cmd += " WHERE " + self.__create_where(sql_dict['WHERE'])
-
-        if 'ORDER_BY' in sql_dict:
-            cmd += " ORDER BY "
-            if isinstance(sql_dict['ORDER_BY'], (tuple, list)):
-                cmd += ",".join(map(str, sql_dict['ORDER_BY']))
-            else:
-                cmd += str(sql_dict['ORDER_BY'])
-
-        if 'LIMIT' in sql_dict:
-            cmd += " LIMIT " + str(sql_dict['LIMIT'])
-
-        attempt.info['cmd'] = cmd
-
-        with self.transaction(mdb.cursors.DictCursor):
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            rows = self.cur.fetchall()
-            return rows
-
-    @retry
-    def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_several=False, WHERE_OR={}, WHERE_AND_OR="OR", attempt=_ATTEMPT):
-        ''' Obtain One row from a table based on name or uuid.
-        Attribute:
-            table: string of table name
-            uuid_name: name or uuid. If not uuid format is found, it is considered a name
-            allow_several: if False return ERROR if more than one row are found
-            error_item_text: in case of error it identifies the 'item' name for a proper output text
-            'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
-            'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional
-        Return: if allow_several==False, a dictionary with this row, or error if no item is found or more than one is found
-                if allow_several==True, a list of dictionaries with the row or rows, error if no item is found
-        '''
-
-        if error_item_text==None:
-            error_item_text = table
-        what = 'uuid' if af.check_valid_uuid(uuid_name) else 'name'
-        cmd = " SELECT * FROM {} WHERE {}='{}'".format(table, what, uuid_name)
-        if WHERE_OR:
-            where_or = self.__create_where(WHERE_OR, use_or=True)
-            if WHERE_AND_OR == "AND":
-                cmd += " AND (" + where_or + ")"
-            else:
-                cmd += " OR " + where_or
-
-        attempt.info['cmd'] = cmd
-
-        with self.transaction(mdb.cursors.DictCursor):
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            number = self.cur.rowcount
-            if number == 0:
-                raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Not_Found)
-            elif number > 1 and not allow_several:
-                raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Conflict)
-            if allow_several:
-                rows = self.cur.fetchall()
-            else:
-                rows = self.cur.fetchone()
-            return rows
-
-    @retry(table='uuids')
-    @with_transaction(cursor='dict')
-    def get_uuid(self, uuid):
-        '''check in the database if this uuid is already present'''
-        self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'")
-        rows = self.cur.fetchall()
-        return self.cur.rowcount, rows
-
-    @retry
-    @with_transaction(cursor='dict')
-    def get_uuid_from_name(self, table, name):
-        '''Searchs in table the name and returns the uuid
-        '''
-        where_text = "name='" + name +"'"
-        self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text)
-        rows = self.cur.fetchall()
-        if self.cur.rowcount==0:
-            return 0, "Name %s not found in table %s" %(name, table)
-        elif self.cur.rowcount>1:
-            return self.cur.rowcount, "More than one VNF with name %s found in table %s" %(name, table)
-        return self.cur.rowcount, rows[0]["uuid"]
diff --git a/osm_ro/http_tools/__init__.py b/osm_ro/http_tools/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/osm_ro/http_tools/errors.py b/osm_ro/http_tools/errors.py
deleted file mode 100644 (file)
index 552e85b..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# -*- coding: utf-8 -*-
-import logging
-from functools import wraps
-
-import bottle
-import yaml
-
-Bad_Request = 400
-Unauthorized = 401
-Not_Found = 404
-Forbidden = 403
-Method_Not_Allowed = 405
-Not_Acceptable = 406
-Request_Timeout = 408
-Conflict = 409
-Service_Unavailable = 503
-Internal_Server_Error = 500
-
-
-class HttpMappedError(Exception):
-    """Base class for a new hierarchy that translate HTTP error codes
-    to python exceptions
-
-    This class accepts an extra argument ``http_code`` (integer
-    representing HTTP error codes).
-    """
-
-    def __init__(self, message, http_code=Internal_Server_Error):
-        Exception.__init__(self, message)
-        self.http_code = http_code
-
-
-class ErrorHandler(object):
-    """Defines a default strategy for handling HttpMappedError.
-
-    This class implements a wrapper (can also be used as decorator), that
-    watches out for different exceptions and log them accordingly.
-
-    Arguments:
-        logger(logging.Logger): logger object to be used to report errors
-    """
-    def __init__(self, logger=None):
-        self.logger = logger or logging.getLogger('openmano.http')
-
-    def __call__(self, function):
-        @wraps(function)
-        def _wraped(*args, **kwargs):
-            try:
-                return function(*args, **kwargs)
-            except bottle.HTTPError:
-                raise
-            except HttpMappedError as ex:
-                self.logger.error(
-                    "%s error %s",
-                    function.__name__, ex.http_code, exc_info=True)
-                bottle.abort(ex.http_code, str(ex))
-            except yaml.YAMLError as ex:
-                self.logger.error(
-                    "YAML error while trying to serialize/unserialize fields",
-                    exc_info=True)
-                bottle.abort(Bad_Request, type(ex).__name__ + ": " + str(ex))
-            except Exception as ex:
-                self.logger.error("Unexpected exception: ", exc_info=True)
-                bottle.abort(Internal_Server_Error,
-                             type(ex).__name__ + ": " + str(ex))
-
-        return _wraped
diff --git a/osm_ro/http_tools/handler.py b/osm_ro/http_tools/handler.py
deleted file mode 100644 (file)
index 49249a8..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-from types import MethodType
-
-from bottle import Bottle
-
-
-class route(object):
-    """Decorator that stores route information, so creating the routes can be
-    postponed.
-
-    This allows methods (OOP) with bottle.
-
-    Arguments:
-        method: HTTP verb (e.g. ``'get'``, ``'post'``, ``'put'``, ...)
-        path: URL path that will be handled by the callback
-    """
-    def __init__(self, method, path, **kwargs):
-        kwargs['method'] = method.upper()
-        self.route_info = (path, kwargs)
-
-    def __call__(self, function):
-        function.route_info = self.route_info
-        return function
-
-
-class BaseHandler(object):
-    """Base class that allows isolated webapp implementation using Bottle,
-    when used in conjunction with the ``route`` decorator.
-
-    In this context, a ``Handler`` is meant to be a collection of Bottle
-    routes/callbacks related to a specific topic.
-
-    A ``Handler`` instance can produce a WSGI app that can be mounted or merged
-    inside another more general bottle app.
-
-    Example:
-
-        from http_tools.handler import Handler, route
-        from http_tools.errors import ErrorHandler
-
-        class MyHandler(Handler):
-            plugins = [ErrorHandler()]
-            url_base = '/my/url/base'
-
-            @route('GET', '/some/path/<var>')
-            def get_var(self, var):
-                return var
-
-        app = MyHandler.wsgi_app
-        # ^  Webapp with a `GET /my/url/base/some/path/<var>` route
-    """
-    _wsgi_app = None
-
-    url_base = ''
-    """String representing a path fragment to be prepended to the routes"""
-
-    plugins = []
-    """Bottle plugins to be installed when creating the WSGI app"""
-
-    @property
-    def wsgi_app(self):
-        """Create a WSGI app based on the implemented callbacks"""
-
-        if self._wsgi_app:
-            # Return if cached
-            return self._wsgi_app
-
-        app = Bottle()
-
-        members = (getattr(self, m) for m in dir(self) if m != 'wsgi_app')
-        callbacks = (m for m in members
-                     if isinstance(m, MethodType) and hasattr(m, 'route_info'))
-
-        for callback in callbacks:
-            path, kwargs = callback.route_info
-            kwargs.update(callback=callback, apply=self.plugins)
-            app.route(self.url_base + path, **kwargs)
-
-        self._wsgi_app = app
-
-        return app
diff --git a/osm_ro/http_tools/request_processing.py b/osm_ro/http_tools/request_processing.py
deleted file mode 100644 (file)
index f2dabc8..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-# -*- coding: utf-8 -*-
-
-#
-# Util functions previously in `httpserver`
-#
-
-__author__ = "Alfonso Tierno, Gerardo Garcia"
-
-import json
-import logging
-
-import bottle
-import yaml
-from jsonschema import exceptions as js_e
-from jsonschema import validate as js_v
-
-from . import errors as httperrors
-
-logger = logging.getLogger('openmano.http')
-
-
-def remove_clear_passwd(data):
-    """
-    Removes clear passwords from the data received
-    :param data: data with clear password
-    :return: data without the password information
-    """
-
-    passw = ['password: ', 'passwd: ']
-
-    for pattern in passw:
-        init = data.find(pattern)
-        while init != -1:
-            end = data.find('\n', init)
-            data = data[:init] + '{}******'.format(pattern) + data[end:]
-            init += 1
-            init = data.find(pattern, init)
-    return data
-
-
-def change_keys_http2db(data, http_db, reverse=False):
-    '''Change keys of dictionary data acording to the key_dict values
-    This allow change from http interface names to database names.
-    When reverse is True, the change is otherwise
-    Attributes:
-        data: can be a dictionary or a list
-        http_db: is a dictionary with hhtp names as keys and database names as value
-        reverse: by default change is done from http api to database.
-            If True change is done otherwise.
-    Return: None, but data is modified'''
-    if type(data) is tuple or type(data) is list:
-        for d in data:
-            change_keys_http2db(d, http_db, reverse)
-    elif type(data) is dict or type(data) is bottle.FormsDict:
-        if reverse:
-            for k,v in http_db.items():
-                if v in data: data[k]=data.pop(v)
-        else:
-            for k,v in http_db.items():
-                if k in data: data[v]=data.pop(k)
-
-
-def format_out(data):
-    '''Return string of dictionary data according to requested json, yaml, xml.
-    By default json
-    '''
-    logger.debug("OUT: " + yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) )
-    accept = bottle.request.headers.get('Accept')
-    if accept and 'application/yaml' in accept:
-        bottle.response.content_type='application/yaml'
-        return yaml.safe_dump(
-                data, explicit_start=True, indent=4, default_flow_style=False,
-                tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
-    else: #by default json
-        bottle.response.content_type='application/json'
-        #return data #json no style
-        return json.dumps(data, indent=4) + "\n"
-
-
-def format_in(default_schema, version_fields=None, version_dict_schema=None, confidential_data=False):
-    """
-    Parse the content of HTTP request against a json_schema
-
-    :param default_schema: The schema to be parsed by default
-        if no version field is found in the client data.
-        In None no validation is done
-    :param version_fields: If provided it contains a tuple or list with the
-        fields to iterate across the client data to obtain the version
-    :param version_dict_schema: It contains a dictionary with the version as key,
-        and json schema to apply as value.
-        It can contain a None as key, and this is apply
-        if the client data version does not match any key
-    :return:  user_data, used_schema: if the data is successfully decoded and
-        matches the schema.
-
-    Launch a bottle abort if fails
-    """
-    #print "HEADERS :" + str(bottle.request.headers.items())
-    try:
-        error_text = "Invalid header format "
-        format_type = bottle.request.headers.get('Content-Type', 'application/json')
-        if 'application/json' in format_type:
-            error_text = "Invalid json format "
-            #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
-            client_data = json.load(bottle.request.body)
-            #client_data = bottle.request.json()
-        elif 'application/yaml' in format_type:
-            error_text = "Invalid yaml format "
-            client_data = yaml.load(bottle.request.body)
-        elif 'application/xml' in format_type:
-            bottle.abort(501, "Content-Type: application/xml not supported yet.")
-        else:
-            logger.warning('Content-Type ' + str(format_type) + ' not supported.')
-            bottle.abort(httperrors.Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
-            return
-        # if client_data == None:
-        #    bottle.abort(httperrors.Bad_Request, "Content error, empty")
-        #    return
-        if confidential_data:
-            logger.debug('IN: %s', remove_clear_passwd (yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
-                                              tags=False, encoding='utf-8', allow_unicode=True)))
-        else:
-            logger.debug('IN: %s', yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
-                                              tags=False, encoding='utf-8', allow_unicode=True) )
-        # look for the client provider version
-        error_text = "Invalid content "
-        if not default_schema and not version_fields:
-            return client_data, None
-        client_version = None
-        used_schema = None
-        if version_fields != None:
-            client_version = client_data
-            for field in version_fields:
-                if field in client_version:
-                    client_version = client_version[field]
-                else:
-                    client_version=None
-                    break
-        if client_version == None:
-            used_schema = default_schema
-        elif version_dict_schema != None:
-            if client_version in version_dict_schema:
-                used_schema = version_dict_schema[client_version]
-            elif None in version_dict_schema:
-                used_schema = version_dict_schema[None]
-        if used_schema==None:
-            bottle.abort(httperrors.Bad_Request, "Invalid schema version or missing version field")
-
-        js_v(client_data, used_schema)
-        return client_data, used_schema
-    except (TypeError, ValueError, yaml.YAMLError) as exc:
-        error_text += str(exc)
-        logger.error(error_text)
-        bottle.abort(httperrors.Bad_Request, error_text)
-    except js_e.ValidationError as exc:
-        logger.error(
-            "validate_in error, jsonschema exception")
-        error_pos = ""
-        if len(exc.path)>0: error_pos=" at " + ":".join(map(json.dumps, exc.path))
-        bottle.abort(httperrors.Bad_Request, error_text + exc.message + error_pos)
-    #except:
-    #    bottle.abort(httperrors.Bad_Request, "Content error: Failed to parse Content-Type",  error_pos)
-    #    raise
-
-def filter_query_string(qs, http2db, allowed):
-    '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
-    Attributes:
-        'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
-        'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
-        'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
-    Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
-        select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
-        where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
-        limit: limit dictated by user with the query string 'limit'. 100 by default
-    abort if not permited, using bottel.abort
-    '''
-    where={}
-    limit=100
-    select=[]
-    #if type(qs) is not bottle.FormsDict:
-    #    bottle.abort(httperrors.Internal_Server_Error, '!!!!!!!!!!!!!!invalid query string not a dictionary')
-    #    #bottle.abort(httperrors.Internal_Server_Error, "call programmer")
-    for k in qs:
-        if k=='field':
-            select += qs.getall(k)
-            for v in select:
-                if v not in allowed:
-                    bottle.abort(httperrors.Bad_Request, "Invalid query string at 'field="+v+"'")
-        elif k=='limit':
-            try:
-                limit=int(qs[k])
-            except:
-                bottle.abort(httperrors.Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
-        else:
-            if k not in allowed:
-                bottle.abort(httperrors.Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
-            if qs[k]!="null":  where[k]=qs[k]
-            else: where[k]=None
-    if len(select)==0: select += allowed
-    #change from http api to database naming
-    for i in range(0,len(select)):
-        k=select[i]
-        if http2db and k in http2db:
-            select[i] = http2db[k]
-    if http2db:
-        change_keys_http2db(where, http2db)
-    #print "filter_query_string", select,where,limit
-
-    return select,where,limit
diff --git a/osm_ro/http_tools/tests/__init__.py b/osm_ro/http_tools/tests/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/osm_ro/http_tools/tests/test_errors.py b/osm_ro/http_tools/tests/test_errors.py
deleted file mode 100644 (file)
index a968e76..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# -*- coding: utf-8 -*-
-import unittest
-
-import bottle
-
-from .. import errors as httperrors
-from ...tests.helpers import TestCaseWithLogging
-
-
-class TestHttpErrors(TestCaseWithLogging):
-    def test_http_error_base(self):
-        # When an error code is passed as argument
-        ex = httperrors.HttpMappedError(http_code=1226324)
-        # then it should be set in the exception object
-        self.assertEqual(ex.http_code, 1226324)
-        # When an error code is not passed as argument
-        ex = httperrors.HttpMappedError()
-        # then the default error code (internal server error) should be used
-        self.assertEqual(ex.http_code, httperrors.Internal_Server_Error)
-
-    def test_error_handler_should_log_unexpected_errors(self):
-        # Given a error handler wraps a function
-        error_handler = httperrors.ErrorHandler(self.logger)
-
-        # and the function raises an unexpected error
-        @error_handler
-        def _throw():
-            raise AttributeError('some error')
-
-        # when the function is called
-        with self.assertRaises(bottle.HTTPError):
-            _throw()
-        logs = self.caplog.getvalue()
-        # then the exception should be contained by bottle
-        # and a proper message should be logged
-        assert "Unexpected exception:" in logs
-
-    def test_error_handler_should_log_http_based_errors(self):
-        # Given a error handler wraps a function
-        error_handler = httperrors.ErrorHandler(self.logger)
-
-        # and the function raises an error that is considered by the
-        # application
-        @error_handler
-        def _throw():
-            raise httperrors.HttpMappedError(http_code=404)
-
-        # when the function is called
-        with self.assertRaises(bottle.HTTPError):
-            _throw()
-        logs = self.caplog.getvalue()
-        # then the exception should be contained by bottle
-        # and a proper message should be logged
-        assert "_throw error 404" in logs
-
-    def test_error_handler_should_ignore_bottle_errors(self):
-        # Given a error handler wraps a function
-        error_handler = httperrors.ErrorHandler(self.logger)
-
-        # and the function raises an error that is considered by the
-        # application
-        exception = bottle.HTTPError()
-
-        @error_handler
-        def _throw():
-            raise exception
-
-        # when the function is called
-        with self.assertRaises(bottle.HTTPError) as context:
-            _throw()
-        # then the exception should bypass the error handler
-        self.assertEqual(context.exception, exception)
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/http_tools/tests/test_handler.py b/osm_ro/http_tools/tests/test_handler.py
deleted file mode 100644 (file)
index af32545..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-import unittest
-
-from mock import MagicMock, patch
-from webtest import TestApp
-
-from .. import handler
-from ..handler import BaseHandler, route
-
-
-class TestIntegration(unittest.TestCase):
-    def test_wsgi_app(self):
-        # Given a Handler class that implements a route
-        some_plugin = MagicMock()
-
-        class MyHandler(BaseHandler):
-            url_base = '/42'
-            plugins = [some_plugin]
-
-            @route('get', '/some/path')
-            def callback(self):
-                return 'some content'
-
-        route_mock = MagicMock()
-        with patch(handler.__name__+'.Bottle.route', route_mock):
-            # When we try to access wsgi_app for the first time
-            my_handler = MyHandler()
-            assert my_handler.wsgi_app
-            # then bottle.route should be called with the right arguments
-            route_mock.assert_called_once_with('/42/some/path', method='GET',
-                                               callback=my_handler.callback,
-                                               apply=[some_plugin])
-
-            # When we try to access wsgi_app for the second time
-            assert my_handler.wsgi_app
-            # then the result should be cached
-            # and bottle.route should not be called again
-            self.assertEqual(route_mock.call_count, 1)
-
-    def test_route_created(self):
-        # Given a Handler class, as in the example documentation
-        class MyHandler(BaseHandler):
-            def __init__(self):
-                self.value = 42
-
-            @route('GET', '/some/path/<param>')
-            def callback(self, param):
-                return '{} + {}'.format(self.value, param)
-
-        # when this class is used to generate a webapp
-        app = TestApp(MyHandler().wsgi_app)
-
-        # then the defined URLs should be available
-        response = app.get('/some/path/0')
-        self.assertEqual(response.status_code, 200)
-        # and the callbacks should have access to ``self``
-        response.mustcontain('42 + 0')
-
-    def test_url_base(self):
-        # Given a Handler class that allows url_base customization
-        class MyHandler(BaseHandler):
-            def __init__(self, url_base):
-                self.url_base = url_base
-
-            @route('GET', '/some/path/<param>')
-            def callback(self, param):
-                return param
-
-        # when this class is used to generate a webapp
-        app = TestApp(MyHandler('/prefix').wsgi_app)
-
-        # then the prefixed URLs should be available
-        response = app.get('/prefix/some/path/content')
-        self.assertEqual(response.status_code, 200)
-        response.mustcontain('content')
-
-    def test_starting_param(self):
-        # Given a Handler class with a route beginning with a param
-        class MyHandler(BaseHandler):
-            @route('GET', '/<param>/some/path')
-            def callback(self, param):
-                return '**{}**'.format(param)
-
-        # is used to generate a webapp
-        app = TestApp(MyHandler().wsgi_app)
-
-        # when the defined URLs is accessed
-        response = app.get('/42/some/path')
-        # Then no error should happen
-        self.assertEqual(response.status_code, 200)
-        response.mustcontain('**42**')
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/http_tools/tox.ini b/osm_ro/http_tools/tox.ini
deleted file mode 100644 (file)
index 43055c2..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-# This tox file allows the devs to run unit tests only for this subpackage.
-# In order to do so, cd into the directory and run `tox`
-
-[tox]
-minversion = 1.8
-envlist = py27,py36,flake8,radon
-skipsdist = True
-
-[testenv]
-changedir = {toxinidir}
-commands =
-    nosetests -d --with-coverage --cover-package=. {posargs:tests}
-deps =
-    WebTest
-    bottle
-    coverage
-    mock
-    nose
-    six
-    PyYaml
-
-[testenv:flake8]
-changedir = {toxinidir}
-deps = flake8
-commands = flake8 {posargs:.}
-
-[testenv:radon]
-changedir = {toxinidir}
-deps = radon
-commands =
-    radon cc --show-complexity --total-average {posargs:.}
-    radon mi -s {posargs:.}
-
-[coverage:run]
-branch = True
-source = {toxinidir}
-omit =
-    tests
-    tests/*
-    */test_*
-    .tox/*
-
-[coverage:report]
-show_missing = True
-
-[flake8]
-exclude =
-    request_processing.py
-    .tox
diff --git a/osm_ro/httpserver.py b/osm_ro/httpserver.py
deleted file mode 100644 (file)
index 613fb08..0000000
+++ /dev/null
@@ -1,1533 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-HTTP server implementing the openmano API. It will answer to POST, PUT, GET methods in the appropriate URLs
-and will use the nfvo.py module to run the appropriate method.
-Every YAML/JSON file is checked against a schema in openmano_schemas.py module.
-'''
-__author__="Alfonso Tierno, Gerardo Garcia"
-__date__ ="$17-sep-2014 09:07:15$"
-
-import bottle
-import yaml
-import threading
-import logging
-
-from openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
-                            nsd_schema_v01, nsd_schema_v02, nsd_schema_v03, scenario_edit_schema, \
-                            scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
-                            tenant_schema, tenant_edit_schema,\
-                            datacenter_schema, datacenter_edit_schema, datacenter_action_schema, datacenter_associate_schema,\
-                            object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
-                            sdn_port_mapping_schema, sdn_external_port_schema
-
-from .http_tools import errors as httperrors
-from .http_tools.request_processing import (
-    format_out,
-    format_in,
-    filter_query_string
-)
-from .wim.http_handler import WimHandler
-
-import nfvo
-import utils
-from db_base import db_base_Exception
-from functools import wraps
-
-global mydb
-global url_base
-global logger
-url_base="/openmano"
-logger = None
-
-
-def log_to_logger(fn):
-    '''
-    Wrap a Bottle request so that a log line is emitted after it's handled.
-    (This decorator can be extended to take the desired logger as a param.)
-    '''
-    @wraps(fn)
-    def _log_to_logger(*args, **kwargs):
-        actual_response = fn(*args, **kwargs)
-        # modify this to log exactly what you need:
-        logger.info('FROM %s %s %s %s', bottle.request.remote_addr,
-                                        bottle.request.method,
-                                        bottle.request.url,
-                                        bottle.response.status)
-        return actual_response
-    return _log_to_logger
-
-class httpserver(threading.Thread):
-    def __init__(self, db, admin=False, host='localhost', port=9090,
-                 wim_persistence=None, wim_engine=None):
-        #global url_base
-        global mydb
-        global logger
-        #initialization
-        if not logger:
-            logger = logging.getLogger('openmano.http')
-        threading.Thread.__init__(self)
-        self.host = host
-        self.port = port   #Port where the listen service must be started
-        if admin==True:
-            self.name = "http_admin"
-        else:
-            self.name = "http"
-            #self.url_preffix = 'http://' + host + ':' + str(port) + url_base
-            mydb = db
-        #self.first_usable_connection_index = 10
-        #self.next_connection_index = self.first_usable_connection_index #The next connection index to be used
-        #Ensure that when the main program exits the thread will also exit
-
-        self.handlers = [
-            WimHandler(db, wim_persistence, wim_engine, url_base)
-        ]
-
-        self.daemon = True
-        self.setDaemon(True)
-
-    def run(self, debug=False, quiet=True):
-        bottle.install(log_to_logger)
-        default_app = bottle.app()
-
-        for handler in self.handlers:
-            default_app.merge(handler.wsgi_app)
-
-        bottle.run(host=self.host, port=self.port, debug=debug, quiet=quiet)
-
-
-def run_bottle(db, host_='localhost', port_=9090):
-    '''Used for launching in main thread, so that it can be debugged'''
-    server = httpserver(db, host=host_, port=port_)
-    server.run(debug=True)  # quiet=True
-
-
-@bottle.route(url_base + '/', method='GET')
-def http_get():
-    #print
-    return 'works' #TODO: to be completed
-
-@bottle.hook('after_request')
-def enable_cors():
-    '''Don't know yet if really needed. Keep it just in case'''
-    bottle.response.headers['Access-Control-Allow-Origin'] = '*'
-
-@bottle.route(url_base + '/version', method='GET')
-def http_get_version():
-    return nfvo.get_version()
-#
-# VNFs
-#
-
-@bottle.route(url_base + '/tenants', method='GET')
-def http_get_tenants():
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    select_,where_,limit_ = filter_query_string(bottle.request.query, None,
-            ('uuid','name','description','created_at') )
-    try:
-        tenants = mydb.get_rows(FROM='nfvo_tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
-        #change_keys_http2db(content, http2db_tenant, reverse=True)
-        utils.convert_float_timestamp2str(tenants)
-        data={'tenants' : tenants}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except db_base_Exception as e:
-        logger.error("http_get_tenants error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
-def http_get_tenant_id(tenant_id):
-    '''get tenant details, can use both uuid or name'''
-    #obtain data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        from_ = 'nfvo_tenants'
-        select_, where_, limit_ = filter_query_string(bottle.request.query, None,
-                                                      ('uuid', 'name', 'description', 'created_at'))
-        what = 'uuid' if utils.check_valid_uuid(tenant_id) else 'name'
-        where_[what] = tenant_id
-        tenants = mydb.get_rows(FROM=from_, SELECT=select_,WHERE=where_)
-        #change_keys_http2db(content, http2db_tenant, reverse=True)
-        if len(tenants) == 0:
-            bottle.abort(httperrors.Not_Found, "No tenant found with {}='{}'".format(what, tenant_id))
-        elif len(tenants) > 1:
-            bottle.abort(httperrors.Bad_Request, "More than one tenant found with {}='{}'".format(what, tenant_id))
-        utils.convert_float_timestamp2str(tenants[0])
-        data = {'tenant': tenants[0]}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except db_base_Exception as e:
-        logger.error("http_get_tenant_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/tenants', method='POST')
-def http_post_tenants():
-    '''insert a tenant into the catalogue. '''
-    #parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in( tenant_schema )
-    r = utils.remove_extra_items(http_content, tenant_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        data = nfvo.new_tenant(mydb, http_content['tenant'])
-        return http_get_tenant_id(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_tenants error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
-def http_edit_tenant_id(tenant_id):
-    '''edit tenant details, can use both uuid or name'''
-    #parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in( tenant_edit_schema )
-    r = utils.remove_extra_items(http_content, tenant_edit_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-
-    #obtain data, check that only one exist
-    try:
-        tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
-        #edit data
-        tenant_id = tenant['uuid']
-        where={'uuid': tenant['uuid']}
-        mydb.update_rows('nfvo_tenants', http_content['tenant'], where)
-        return http_get_tenant_id(tenant_id)
-    except bottle.HTTPError:
-        raise
-    except db_base_Exception as e:
-        logger.error("http_edit_tenant_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
-def http_delete_tenant_id(tenant_id):
-    '''delete a tenant from database, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        data = nfvo.delete_tenant(mydb, tenant_id)
-        return format_out({"result":"tenant " + data + " deleted"})
-    except bottle.HTTPError:
-        raise
-    except db_base_Exception as e:
-        logger.error("http_delete_tenant_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters', method='GET')
-def http_get_datacenters(tenant_id):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        if tenant_id != 'any':
-            #check valid tenant_id
-            nfvo.check_tenant(mydb, tenant_id)
-        select_,where_,limit_ = filter_query_string(bottle.request.query, None,
-                ('uuid','name','vim_url','type','created_at') )
-        if tenant_id != 'any':
-            where_['nfvo_tenant_id'] = tenant_id
-            if 'created_at' in select_:
-                select_[ select_.index('created_at') ] = 'd.created_at as created_at'
-            if 'created_at' in where_:
-                where_['d.created_at'] = where_.pop('created_at')
-            datacenters = mydb.get_rows(FROM='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id',
-                                          SELECT=select_,WHERE=where_,LIMIT=limit_)
-        else:
-            datacenters = mydb.get_rows(FROM='datacenters',
-                                          SELECT=select_,WHERE=where_,LIMIT=limit_)
-        #change_keys_http2db(content, http2db_tenant, reverse=True)
-        utils.convert_float_timestamp2str(datacenters)
-        data={'datacenters' : datacenters}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_datacenters error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='GET')
-@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='GET')
-def http_get_vim_account(tenant_id, vim_account_id=None):
-    '''get vim_account list/details, '''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        select_ = ('uuid', 'name', 'dt.datacenter_id as vim_id', 'vim_tenant_name', 'vim_tenant_id', 'user', 'config',
-                   'dt.created_at as created_at', 'passwd')
-        where_ = {'nfvo_tenant_id': tenant_id}
-        if vim_account_id:
-            where_['dt.uuid'] = vim_account_id
-        from_ = 'tenants_datacenters as td join datacenter_tenants as dt on dt.uuid=td.datacenter_tenant_id'
-        vim_accounts = mydb.get_rows(SELECT=select_, FROM=from_, WHERE=where_)
-
-        if len(vim_accounts) == 0 and vim_account_id:
-            bottle.abort(HTTP_Not_Found, "No vim_account found for tenant {} and id '{}'".format(tenant_id,
-                                                                                                 vim_account_id))
-        for vim_account in vim_accounts:
-                if vim_account["passwd"]:
-                    vim_account["passwd"] = "******"
-                if vim_account['config'] != None:
-                    try:
-                        config_dict = yaml.load(vim_account['config'])
-                        vim_account['config'] = config_dict
-                        if vim_account['config'].get('admin_password'):
-                            vim_account['config']['admin_password'] = "******"
-                        if vim_account['config'].get('vcenter_password'):
-                            vim_account['config']['vcenter_password'] = "******"
-                        if vim_account['config'].get('nsx_password'):
-                            vim_account['config']['nsx_password'] = "******"
-                    except Exception as e:
-                        logger.error("Exception '%s' while trying to load config information", str(e))
-        # change_keys_http2db(content, http2db_datacenter, reverse=True)
-        #convert_datetime2str(vim_account)
-        if vim_account_id:
-            return format_out({"datacenter": vim_accounts[0]})
-        else:
-            return format_out({"datacenters": vim_accounts})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='GET')
-def http_get_datacenter_id(tenant_id, datacenter_id):
-    '''get datacenter details, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        if tenant_id != 'any':
-            #check valid tenant_id
-            nfvo.check_tenant(mydb, tenant_id)
-        #obtain data
-        what = 'uuid' if utils.check_valid_uuid(datacenter_id) else 'name'
-        where_={}
-        where_[what] = datacenter_id
-        select_=['uuid', 'name','vim_url', 'vim_url_admin', 'type', 'd.config as config', 'description', 'd.created_at as created_at']
-        if tenant_id != 'any':
-            select_.append("datacenter_tenant_id")
-            where_['td.nfvo_tenant_id']= tenant_id
-            from_='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id'
-        else:
-            from_='datacenters as d'
-        datacenters = mydb.get_rows(
-                    SELECT=select_,
-                    FROM=from_,
-                    WHERE=where_)
-
-        if len(datacenters)==0:
-            bottle.abort( httperrors.Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
-        elif len(datacenters)>1:
-            bottle.abort( httperrors.Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
-        datacenter = datacenters[0]
-        if tenant_id != 'any':
-            #get vim tenant info
-            vim_tenants = mydb.get_rows(
-                    SELECT=("vim_tenant_name", "vim_tenant_id", "user", "passwd", "config"),
-                    FROM="datacenter_tenants",
-                    WHERE={"uuid": datacenters[0]["datacenter_tenant_id"]},
-                    ORDER_BY=("created", ) )
-            del datacenter["datacenter_tenant_id"]
-            datacenter["vim_tenants"] = vim_tenants
-            for vim_tenant in vim_tenants:
-                if vim_tenant["passwd"]:
-                    vim_tenant["passwd"] = "******"
-                if vim_tenant['config'] != None:
-                    try:
-                        config_dict = yaml.load(vim_tenant['config'])
-                        vim_tenant['config'] = config_dict
-                        if vim_tenant['config'].get('admin_password'):
-                            vim_tenant['config']['admin_password'] = "******"
-                        if vim_tenant['config'].get('vcenter_password'):
-                            vim_tenant['config']['vcenter_password'] = "******"
-                        if vim_tenant['config'].get('nsx_password'):
-                            vim_tenant['config']['nsx_password'] = "******"
-                    except Exception as e:
-                        logger.error("Exception '%s' while trying to load config information", str(e))
-
-        if datacenter['config'] != None:
-            try:
-                config_dict = yaml.load(datacenter['config'])
-                datacenter['config'] = config_dict
-                if datacenter['config'].get('admin_password'):
-                    datacenter['config']['admin_password'] = "******"
-                if datacenter['config'].get('vcenter_password'):
-                    datacenter['config']['vcenter_password'] = "******"
-                if datacenter['config'].get('nsx_password'):
-                    datacenter['config']['nsx_password'] = "******"
-            except Exception as e:
-                logger.error("Exception '%s' while trying to load config information", str(e))
-        #change_keys_http2db(content, http2db_datacenter, reverse=True)
-        utils.convert_float_timestamp2str(datacenter)
-        data={'datacenter' : datacenter}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/datacenters', method='POST')
-def http_post_datacenters():
-    '''insert a datacenter into the catalogue. '''
-    #parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in(datacenter_schema, confidential_data=True)
-    r = utils.remove_extra_items(http_content, datacenter_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        data = nfvo.new_datacenter(mydb, http_content['datacenter'])
-        return http_get_datacenter_id('any', data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_datacenters error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/datacenters/<datacenter_id_name>', method='PUT')
-def http_edit_datacenter_id(datacenter_id_name):
-    '''edit datacenter details, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #parse input data
-    http_content,_ = format_in( datacenter_edit_schema )
-    r = utils.remove_extra_items(http_content, datacenter_edit_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-
-    try:
-        datacenter_id = nfvo.edit_datacenter(mydb, datacenter_id_name, http_content['datacenter'])
-        return http_get_datacenter_id('any', datacenter_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_edit_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
-def http_post_sdn_controller(tenant_id):
-    '''insert a sdn controller into the catalogue. '''
-    #parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in( sdn_controller_schema )
-    try:
-        logger.debug("tenant_id: "+tenant_id)
-        #logger.debug("content: {}".format(http_content['sdn_controller']))
-
-        data = nfvo.sdn_controller_create(mydb, tenant_id, http_content['sdn_controller'])
-        return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, data)})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
-def http_put_sdn_controller_update(tenant_id, controller_id):
-    '''Update sdn controller'''
-    #parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in( sdn_controller_edit_schema )
-#    r = utils.remove_extra_items(http_content, datacenter_schema)
-#    if r:
-#        logger.debug("Remove received extra items %s", str(r))
-    try:
-        #logger.debug("tenant_id: "+tenant_id)
-        logger.debug("content: {}".format(http_content['sdn_controller']))
-
-        data = nfvo.sdn_controller_update(mydb, tenant_id, controller_id, http_content['sdn_controller'])
-        return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, controller_id)})
-
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
-def http_get_sdn_controller(tenant_id):
-    '''get sdn controllers list, can use both uuid or name'''
-    try:
-        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-
-        data = {'sdn_controllers': nfvo.sdn_controller_list(mydb, tenant_id)}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_sdn_controller error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
-def http_get_sdn_controller_id(tenant_id, controller_id):
-    '''get sdn controller details, can use both uuid or name'''
-    try:
-        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-        data = nfvo.sdn_controller_list(mydb, tenant_id, controller_id)
-        return format_out({"sdn_controllers": data})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
-def http_delete_sdn_controller_id(tenant_id, controller_id):
-    '''delete sdn controller, can use both uuid or name'''
-    try:
-        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-        data = nfvo.sdn_controller_delete(mydb, tenant_id, controller_id)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
-def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
-    '''Set the sdn port mapping for a datacenter. '''
-    #parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content, _ = format_in(sdn_port_mapping_schema)
-#    r = utils.remove_extra_items(http_content, datacenter_schema)
-#    if r:
-#        logger.debug("Remove received extra items %s", str(r))
-    try:
-        data = nfvo.datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, http_content['sdn_port_mapping'])
-        return format_out({"sdn_port_mapping": data})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
-def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
-    '''get datacenter sdn mapping details, can use both uuid or name'''
-    try:
-        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-
-        data = nfvo.datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id)
-        return format_out({"sdn_port_mapping": data})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
-def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
-    '''clean datacenter sdn mapping, can use both uuid or name'''
-    try:
-        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-        data = nfvo.datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id)
-        return format_out({"result": data})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET')  #deprecated
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='GET')
-def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
-    '''get datacenter networks, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #obtain data
-    try:
-        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
-        where_= {"datacenter_id":datacenter_dict['uuid']}
-        if netmap_id:
-            if utils.check_valid_uuid(netmap_id):
-                where_["uuid"] = netmap_id
-            else:
-                where_["name"] = netmap_id
-        netmaps =mydb.get_rows(FROM='datacenter_nets',
-                                        SELECT=('name','vim_net_id as vim_id', 'uuid', 'type','multipoint','shared','description', 'created_at'),
-                                        WHERE=where_ )
-        utils.convert_float_timestamp2str(netmaps)
-        utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
-        if netmap_id and len(netmaps)==1:
-            data={'netmap' : netmaps[0]}
-        elif netmap_id and len(netmaps)==0:
-            bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
-            return
-        else:
-            data={'netmaps' : netmaps}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_getnetwork_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='DELETE')
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='DELETE')
-def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
-    '''get datacenter networks, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #obtain data
-    try:
-        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
-        where_= {"datacenter_id":datacenter_dict['uuid']}
-        if netmap_id:
-            if utils.check_valid_uuid(netmap_id):
-                where_["uuid"] = netmap_id
-            else:
-                where_["name"] = netmap_id
-        #change_keys_http2db(content, http2db_tenant, reverse=True)
-        deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_)
-        if deleted == 0 and netmap_id:
-            bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
-        if netmap_id:
-            return format_out({"result": "netmap %s deleted" % netmap_id})
-        else:
-            return format_out({"result": "%d netmap deleted" % deleted})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/upload', method='POST')
-def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, None)
-        utils.convert_float_timestamp2str(netmaps)
-        utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
-        data={'netmaps' : netmaps}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_uploadnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='POST')
-def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
-    '''creates a new netmap'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #parse input data
-    http_content,_ = format_in( netmap_new_schema )
-    r = utils.remove_extra_items(http_content, netmap_new_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        #obtain data, check that only one exist
-        netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, http_content)
-        utils.convert_float_timestamp2str(netmaps)
-        utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
-        data={'netmaps' : netmaps}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_postnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='PUT')
-def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
-    '''edit a  netmap'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #parse input data
-    http_content,_ = format_in( netmap_edit_schema )
-    r = utils.remove_extra_items(http_content, netmap_edit_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-
-    #obtain data, check that only one exist
-    try:
-        nfvo.datacenter_edit_netmap(mydb, tenant_id, datacenter_id, netmap_id, http_content)
-        return http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_putnettmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/action', method='POST')
-def http_action_datacenter_id(tenant_id, datacenter_id):
-    '''perform an action over datacenter, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #parse input data
-    http_content,_ = format_in( datacenter_action_schema )
-    r = utils.remove_extra_items(http_content, datacenter_action_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        #obtain data, check that only one exist
-        result = nfvo.datacenter_action(mydb, tenant_id, datacenter_id, http_content)
-        if 'net-update' in http_content:
-            return http_getnetmap_datacenter_id(datacenter_id)
-        else:
-            return format_out(result)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_action_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/datacenters/<datacenter_id>', method='DELETE')
-def http_delete_datacenter_id( datacenter_id):
-    '''delete a tenant from database, can use both uuid or name'''
-
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        data = nfvo.delete_datacenter(mydb, datacenter_id)
-        return format_out({"result":"datacenter '" + data + "' deleted"})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_datacenter_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='POST')
-@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='POST')
-def http_associate_datacenters(tenant_id, datacenter_id=None):
-    '''associate an existing datacenter to a this tenant. '''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #parse input data
-    http_content,_ = format_in(datacenter_associate_schema, confidential_data=True)
-    r = utils.remove_extra_items(http_content, datacenter_associate_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        vim_account_id = nfvo.create_vim_account(mydb, tenant_id, datacenter_id,
-                                                             **http_content['datacenter'])
-        return http_get_vim_account(tenant_id, vim_account_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_associate_datacenters error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='PUT')
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
-def http_vim_account_edit(tenant_id, vim_account_id=None, datacenter_id=None):
-    '''associate an existing datacenter to a this tenant. '''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #parse input data
-    http_content,_ = format_in(datacenter_associate_schema)
-    r = utils.remove_extra_items(http_content, datacenter_associate_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        vim_account_id = nfvo.edit_vim_account(mydb, tenant_id, vim_account_id, datacenter_id=datacenter_id,
-                                               **http_content['datacenter'])
-        return http_get_vim_account(tenant_id, vim_account_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_vim_account_edit error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
-@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='DELETE')
-def http_deassociate_datacenters(tenant_id, datacenter_id=None, vim_account_id=None):
-    '''deassociate an existing datacenter to a this tenant. '''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        data = nfvo.delete_vim_account(mydb, tenant_id, vim_account_id, datacenter_id)
-        return format_out({"result": data})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_deassociate_datacenters error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/attach', method='POST')
-def http_post_vim_net_sdn_attach(tenant_id, datacenter_id, network_id):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content, _ = format_in(sdn_external_port_schema)
-    try:
-        data = nfvo.vim_net_sdn_attach(mydb, tenant_id, datacenter_id, network_id, http_content)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_vim_net_sdn_attach error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach', method='DELETE')
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach/<port_id>', method='DELETE')
-def http_delete_vim_net_sdn_detach(tenant_id, datacenter_id, network_id, port_id=None):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        data = nfvo.vim_net_sdn_detach(mydb, tenant_id, datacenter_id, network_id, port_id)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_vim_net_sdn_detach error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='GET')
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='GET')
-def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        data = nfvo.vim_action_get(mydb, tenant_id, datacenter_id, item, name)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_vim_items error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='DELETE')
-def http_del_vim_items(tenant_id, datacenter_id, item, name):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        data = nfvo.vim_action_delete(mydb, tenant_id, datacenter_id, item, name)
-        return format_out({"result":data})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_del_vim_items error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='POST')
-def http_post_vim_items(tenant_id, datacenter_id, item):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in( object_schema )
-    try:
-        data = nfvo.vim_action_create(mydb, tenant_id, datacenter_id, item, http_content)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_vim_items error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/vnfs', method='GET')
-def http_get_vnfs(tenant_id):
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        if tenant_id != 'any':
-            #check valid tenant_id
-            nfvo.check_tenant(mydb, tenant_id)
-        select_,where_,limit_ = filter_query_string(bottle.request.query, None,
-                ('uuid', 'name', 'osm_id', 'description', 'public', "tenant_id", "created_at") )
-        if tenant_id != "any":
-            where_["OR"]={"tenant_id": tenant_id, "public": True}
-        vnfs = mydb.get_rows(FROM='vnfs', SELECT=select_, WHERE=where_, LIMIT=limit_)
-        # change_keys_http2db(content, http2db_vnf, reverse=True)
-        utils.convert_str2boolean(vnfs, ('public',))
-        utils.convert_float_timestamp2str(vnfs)
-        data={'vnfs': vnfs}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_vnfs error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='GET')
-def http_get_vnf_id(tenant_id,vnf_id):
-    '''get vnf details, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        vnf = nfvo.get_vnf_id(mydb,tenant_id,vnf_id)
-        utils.convert_str2boolean(vnf, ('public',))
-        utils.convert_float_timestamp2str(vnf)
-        return format_out(vnf)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_vnf_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/vnfs', method='POST')
-def http_post_vnfs(tenant_id):
-    """ Insert a vnf into the catalogue. Creates the flavor and images, and fill the tables at database
-    :param tenant_id: tenant that this vnf belongs to
-    :return:
-    """
-    # print "Parsing the YAML file of the VNF"
-    # parse input data
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content, used_schema = format_in( vnfd_schema_v01, ("schema_version",), {"0.2": vnfd_schema_v02})
-    r = utils.remove_extra_items(http_content, used_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        if used_schema == vnfd_schema_v01:
-            vnf_id = nfvo.new_vnf(mydb,tenant_id,http_content)
-        elif used_schema == vnfd_schema_v02:
-            vnf_id = nfvo.new_vnf_v02(mydb,tenant_id,http_content)
-        else:
-            logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
-            bottle.abort(httperrors.Bad_Request, "Invalid schema version")
-        return http_get_vnf_id(tenant_id, vnf_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/v3/<tenant_id>/vnfd', method='POST')
-def http_post_vnfs_v3(tenant_id):
-    """
-    Insert one or several VNFs in the catalog, following OSM IM
-    :param tenant_id: tenant owner of the VNF
-    :return: The detailed list of inserted VNFs, following the old format
-    """
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content, _ = format_in(None)
-    try:
-        vnfd_uuid_list = nfvo.new_vnfd_v3(mydb, tenant_id, http_content)
-        vnfd_list = []
-        for vnfd_uuid in vnfd_uuid_list:
-            vnf = nfvo.get_vnf_id(mydb, tenant_id, vnfd_uuid)
-            utils.convert_str2boolean(vnf, ('public',))
-            utils.convert_float_timestamp2str(vnf)
-            vnfd_list.append(vnf["vnf"])
-        return format_out({"vnfd": vnfd_list})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='DELETE')
-def http_delete_vnf_id(tenant_id, vnf_id):
-    '''delete a vnf from database, and images and flavors in VIM when appropriate, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #check valid tenant_id and deletes the vnf, including images,
-    try:
-        data = nfvo.delete_vnf(mydb,tenant_id,vnf_id)
-        #print json.dumps(data, indent=4)
-        return format_out({"result":"VNF " + data + " deleted"})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_vnf_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-#@bottle.route(url_base + '/<tenant_id>/hosts/topology', method='GET')
-#@bottle.route(url_base + '/<tenant_id>/physicalview/Madrid-Alcantara', method='GET')
-@bottle.route(url_base + '/<tenant_id>/physicalview/<datacenter>', method='GET')
-def http_get_hosts(tenant_id, datacenter):
-    '''get the tidvim host hopology from the vim.'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #print "http_get_hosts received by tenant " + tenant_id + ' datacenter ' + datacenter
-    try:
-        if datacenter == 'treeview':
-            data = nfvo.get_hosts(mydb, tenant_id)
-        else:
-            #openmano-gui is using a hardcoded value for the datacenter
-            result, data = nfvo.get_hosts_info(mydb, tenant_id) #, datacenter)
-
-        if result < 0:
-            #print "http_get_hosts error %d %s" % (-result, data)
-            bottle.abort(-result, data)
-        else:
-            utils.convert_float_timestamp2str(data)
-            #print json.dumps(data, indent=4)
-            return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_hosts error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<path:path>', method='OPTIONS')
-def http_options_deploy(path):
-    '''For some reason GUI web ask for OPTIONS that must be responded'''
-    #TODO: check correct path, and correct headers request
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    bottle.response.set_header('Access-Control-Allow-Methods','POST, GET, PUT, DELETE, OPTIONS')
-    bottle.response.set_header('Accept','application/yaml,application/json')
-    bottle.response.set_header('Content-Type','application/yaml,application/json')
-    bottle.response.set_header('Access-Control-Allow-Headers','content-type')
-    bottle.response.set_header('Access-Control-Allow-Origin','*')
-    return
-
-@bottle.route(url_base + '/<tenant_id>/topology/deploy', method='POST')
-def http_post_deploy(tenant_id):
-    '''post topology deploy.'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-
-    http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02})
-    #r = utils.remove_extra_items(http_content, used_schema)
-    #if r is not None: print "http_post_deploy: Warning: remove extra items ", r
-    #print "http_post_deploy input: ",  http_content
-
-    try:
-        scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
-        instance = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['name'], http_content['name'])
-        #print json.dumps(data, indent=4)
-        return format_out(instance)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_deploy error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/topology/verify', method='POST')
-def http_post_verify(tenant_id):
-    #TODO:
-#    '''post topology verify'''
-#    print "http_post_verify by tenant " + tenant_id + ' datacenter ' + datacenter
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    return
-
-#
-# SCENARIOS
-#
-
-@bottle.route(url_base + '/<tenant_id>/scenarios', method='POST')
-def http_post_scenarios(tenant_id):
-    '''add a scenario into the catalogue. Creates the scenario and its internal structure in the OPENMANO DB'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02, "0.3": nsd_schema_v03})
-    #r = utils.remove_extra_items(http_content, used_schema)
-    #if r is not None: print "http_post_scenarios: Warning: remove extra items ", r
-    #print "http_post_scenarios input: ",  http_content
-    try:
-        if used_schema == nsd_schema_v01:
-            scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
-        elif used_schema == nsd_schema_v02:
-            scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.2")
-        elif used_schema == nsd_schema_v03:
-            scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.3")
-        else:
-            logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
-            bottle.abort(httperrors.Bad_Request, "Invalid schema version")
-        #print json.dumps(data, indent=4)
-        #return format_out(data)
-        return http_get_scenario_id(tenant_id, scenario_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_scenarios error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/v3/<tenant_id>/nsd', method='POST')
-def http_post_nsds_v3(tenant_id):
-    """
-    Insert one or several NSDs in the catalog, following OSM IM
-    :param tenant_id: tenant owner of the NSD
-    :return: The detailed list of inserted NSDs, following the old format
-    """
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content, _ = format_in(None)
-    try:
-        nsd_uuid_list = nfvo.new_nsd_v3(mydb, tenant_id, http_content)
-        nsd_list = []
-        for nsd_uuid in nsd_uuid_list:
-            scenario = mydb.get_scenario(nsd_uuid, tenant_id)
-            utils.convert_float_timestamp2str(scenario)
-            nsd_list.append(scenario)
-        data = {'nsd': nsd_list}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_nsds_v3 error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>/action', method='POST')
-def http_post_scenario_action(tenant_id, scenario_id):
-    '''take an action over a scenario'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    # parse input data
-    http_content, _ = format_in(scenario_action_schema)
-    r = utils.remove_extra_items(http_content, scenario_action_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        # check valid tenant_id
-        nfvo.check_tenant(mydb, tenant_id)
-        if "start" in http_content:
-            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['start']['instance_name'], \
-                        http_content['start'].get('description',http_content['start']['instance_name']),
-                        http_content['start'].get('datacenter') )
-            return format_out(data)
-        elif "deploy" in http_content:   #Equivalent to start
-            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['deploy']['instance_name'],
-                        http_content['deploy'].get('description',http_content['deploy']['instance_name']),
-                        http_content['deploy'].get('datacenter') )
-            return format_out(data)
-        elif "reserve" in http_content:   #Reserve resources
-            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['reserve']['instance_name'],
-                        http_content['reserve'].get('description',http_content['reserve']['instance_name']),
-                        http_content['reserve'].get('datacenter'),  startvms=False )
-            return format_out(data)
-        elif "verify" in http_content:   #Equivalent to start and then delete
-            data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['verify']['instance_name'],
-                        http_content['verify'].get('description',http_content['verify']['instance_name']),
-                        http_content['verify'].get('datacenter'), startvms=False )
-            instance_id = data['uuid']
-            nfvo.delete_instance(mydb, tenant_id,instance_id)
-            return format_out({"result":"Verify OK"})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_scenario_action error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/scenarios', method='GET')
-def http_get_scenarios(tenant_id):
-    '''get scenarios list'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        #obtain data
-        s,w,l=filter_query_string(bottle.request.query, None,
-                                  ('uuid', 'name', 'osm_id', 'description', 'tenant_id', 'created_at', 'public'))
-        if tenant_id != "any":
-            w["OR"] = {"tenant_id": tenant_id, "public": True}
-        scenarios = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='scenarios')
-        utils.convert_float_timestamp2str(scenarios)
-        utils.convert_str2boolean(scenarios, ('public',) )
-        data={'scenarios':scenarios}
-        #print json.dumps(scenarios, indent=4)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='GET')
-def http_get_scenario_id(tenant_id, scenario_id):
-    '''get scenario details, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        #obtain data
-        scenario = mydb.get_scenario(scenario_id, tenant_id)
-        utils.convert_float_timestamp2str(scenario)
-        data={'scenario' : scenario}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='DELETE')
-def http_delete_scenario_id(tenant_id, scenario_id):
-    '''delete a scenario from database, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        #obtain data
-        data = mydb.delete_scenario(scenario_id, tenant_id)
-        #print json.dumps(data, indent=4)
-        return format_out({"result":"scenario " + data + " deleted"})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_scenario_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='PUT')
-def http_put_scenario_id(tenant_id, scenario_id):
-    '''edit an existing scenario id'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    http_content,_ = format_in( scenario_edit_schema )
-    #r = utils.remove_extra_items(http_content, scenario_edit_schema)
-    #if r is not None: print "http_put_scenario_id: Warning: remove extra items ", r
-    #print "http_put_scenario_id input: ",  http_content
-    try:
-        nfvo.edit_scenario(mydb, tenant_id, scenario_id, http_content)
-        #print json.dumps(data, indent=4)
-        #return format_out(data)
-        return http_get_scenario_id(tenant_id, scenario_id)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_put_scenario_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-@bottle.route(url_base + '/<tenant_id>/instances', method='POST')
-def http_post_instances(tenant_id):
-    '''create an instance-scenario'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    # parse input data
-    http_content, used_schema = format_in(instance_scenario_create_schema_v01)
-    r = utils.remove_extra_items(http_content, used_schema)
-    if r is not None:
-        logger.warning("http_post_instances: Warning: remove extra items %s", str(r))
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        data = nfvo.create_instance(mydb, tenant_id, http_content["instance"])
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-#
-# INSTANCES
-#
-@bottle.route(url_base + '/<tenant_id>/instances', method='GET')
-def http_get_instances(tenant_id):
-    '''get instance list'''
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        #obtain data
-        s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'scenario_id', 'tenant_id', 'description', 'created_at'))
-        if tenant_id != "any":
-            w['tenant_id'] = tenant_id
-        instances = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='instance_scenarios')
-        utils.convert_float_timestamp2str(instances)
-        utils.convert_str2boolean(instances, ('public',) )
-        data={'instances':instances}
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_instances error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='GET')
-def http_get_instance_id(tenant_id, instance_id):
-    '''get instances details, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        if tenant_id == "any":
-            tenant_id = None
-
-        instance = nfvo.get_instance_id(mydb, tenant_id, instance_id)
-
-        # Workaround to SO, convert vnfs:vms:interfaces:ip_address from ";" separated list to report the first value
-        for vnf in instance.get("vnfs", ()):
-            for vm in vnf.get("vms", ()):
-                for iface in vm.get("interfaces", ()):
-                    if iface.get("ip_address"):
-                        index = iface["ip_address"].find(";")
-                        if index >= 0:
-                            iface["ip_address"] = iface["ip_address"][:index]
-        utils.convert_float_timestamp2str(instance)
-        # print json.dumps(instance, indent=4)
-        return format_out(instance)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_instance_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='DELETE')
-def http_delete_instance_id(tenant_id, instance_id):
-    '''delete instance from VIM and from database, can use both uuid or name'''
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        if tenant_id == "any":
-            tenant_id = None
-        #obtain data
-        message = nfvo.delete_instance(mydb, tenant_id,instance_id)
-        return format_out({"result":message})
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_delete_instance_id error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='POST')
-def http_post_instance_scenario_action(tenant_id, instance_id):
-    """
-    take an action over a scenario instance
-    :param tenant_id: tenant where user belongs to
-    :param instance_id: instance indentity
-    :return:
-    """
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    # parse input data
-    http_content, _ = format_in(instance_scenario_action_schema)
-    r = utils.remove_extra_items(http_content, instance_scenario_action_schema)
-    if r:
-        logger.debug("Remove received extra items %s", str(r))
-    try:
-        #check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-
-        #print "http_post_instance_scenario_action input: ", http_content
-        #obtain data
-        instance = mydb.get_instance_scenario(instance_id, tenant_id)
-        instance_id = instance["uuid"]
-
-        data = nfvo.instance_action(mydb, tenant_id, instance_id, http_content)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='GET')
-@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action/<action_id>', method='GET')
-def http_get_instance_scenario_action(tenant_id, instance_id, action_id=None):
-    """
-    List the actions done over an instance, or the action details
-    :param tenant_id: tenant where user belongs to. Can be "any" to ignore
-    :param instance_id: instance id, can be "any" to get actions of all instances
-    :return:
-    """
-    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    try:
-        # check valid tenant_id
-        if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id)
-        data = nfvo.instance_action_get(mydb, tenant_id, instance_id, action_id)
-        return format_out(data)
-    except bottle.HTTPError:
-        raise
-    except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_get_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-
-@bottle.error(400)
-@bottle.error(401)
-@bottle.error(404)
-@bottle.error(403)
-@bottle.error(405)
-@bottle.error(406)
-@bottle.error(409)
-@bottle.error(503)
-@bottle.error(500)
-def error400(error):
-    e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
-    bottle.response.headers['Access-Control-Allow-Origin'] = '*'
-    return format_out(e)
-
diff --git a/osm_ro/nfvo.py b/osm_ro/nfvo.py
deleted file mode 100644 (file)
index 4970500..0000000
+++ /dev/null
@@ -1,5774 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-NFVO engine, implementing all the methods for the creation, deletion and management of vnfs, scenarios and instances
-'''
-__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
-__date__ ="$16-sep-2014 22:05:01$"
-
-# import imp
-import json
-import yaml
-import utils
-from utils import deprecated
-import vim_thread
-import console_proxy_thread as cli
-import vimconn
-import logging
-import collections
-import math
-from uuid import uuid4
-from db_base import db_base_Exception
-
-import nfvo_db
-from threading import Lock
-import time as t
-from lib_osm_openvim import ovim as ovim_module
-from lib_osm_openvim.ovim import ovimException
-from Crypto.PublicKey import RSA
-
-import osm_im.vnfd as vnfd_catalog
-import osm_im.nsd as nsd_catalog
-from pyangbind.lib.serialise import pybindJSONDecoder
-from copy import deepcopy
-
-
-# WIM
-import wim.wimconn as wimconn
-import wim.wim_thread as wim_thread
-from .http_tools import errors as httperrors
-from .wim.engine import WimEngine
-from .wim.persistence import WimPersistence
-from copy import deepcopy
-from pprint import pformat
-#
-
-global global_config
-global vimconn_imported
-# WIM
-global wim_engine
-wim_engine  = None
-global wimconn_imported
-#
-global logger
-global default_volume_size
-default_volume_size = '5' #size in GB
-global ovim
-ovim = None
-global_config = None
-
-vimconn_imported = {}   # dictionary with VIM type as key, loaded module as value
-vim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-VIMs
-vim_persistent_info = {}
-# WIM
-wimconn_imported = {}   # dictionary with WIM type as key, loaded module as value
-wim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-WIMs
-wim_persistent_info = {}
-#
-
-logger = logging.getLogger('openmano.nfvo')
-task_lock = Lock()
-last_task_id = 0.0
-db = None
-db_lock = Lock()
-
-
-class NfvoException(httperrors.HttpMappedError):
-    """Common Class for NFVO errors"""
-
-
-def get_task_id():
-    global last_task_id
-    task_id = t.time()
-    if task_id <= last_task_id:
-        task_id = last_task_id + 0.000001
-    last_task_id = task_id
-    return "ACTION-{:.6f}".format(task_id)
-    # return (t.strftime("%Y%m%dT%H%M%S.{}%Z", t.localtime(task_id))).format(int((task_id % 1)*1e6))
-
-
-def new_task(name, params, depends=None):
-    """Deprected!!!"""
-    task_id = get_task_id()
-    task = {"status": "enqueued", "id": task_id, "name": name, "params": params}
-    if depends:
-        task["depends"] = depends
-    return task
-
-
-def is_task_id(id):
-    return True if id[:5] == "TASK-" else False
-
-
-def get_non_used_vim_name(datacenter_name, datacenter_id, tenant_name, tenant_id):
-    name = datacenter_name[:16]
-    if name not in vim_threads["names"]:
-        vim_threads["names"].append(name)
-        return name
-    if tenant_name:
-        name = datacenter_name[:16] + "." + tenant_name[:16]
-        if name not in vim_threads["names"]:
-            vim_threads["names"].append(name)
-            return name
-    name = datacenter_id
-    vim_threads["names"].append(name)
-    return name
-
-# -- Move
-def get_non_used_wim_name(wim_name, wim_id, tenant_name, tenant_id):
-    name = wim_name[:16]
-    if name not in wim_threads["names"]:
-        wim_threads["names"].append(name)
-        return name
-    name = wim_name[:16] + "." + tenant_name[:16]
-    if name not in wim_threads["names"]:
-        wim_threads["names"].append(name)
-        return name
-    name = wim_id + "-" + tenant_id
-    wim_threads["names"].append(name)
-    return name
-
-
-def start_service(mydb, persistence=None, wim=None):
-    global db, global_config
-    db = nfvo_db.nfvo_db(lock=db_lock)
-    mydb.lock = db_lock
-    db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
-    global ovim
-
-    persistence = persistence or  WimPersistence(db)
-
-    # Initialize openvim for SDN control
-    # TODO: Avoid static configuration by adding new parameters to openmanod.cfg
-    # TODO: review ovim.py to delete not needed configuration
-    ovim_configuration = {
-        'logger_name': 'openmano.ovim',
-        'network_vlan_range_start': 1000,
-        'network_vlan_range_end': 4096,
-        'db_name': global_config["db_ovim_name"],
-        'db_host': global_config["db_ovim_host"],
-        'db_user': global_config["db_ovim_user"],
-        'db_passwd': global_config["db_ovim_passwd"],
-        'bridge_ifaces': {},
-        'mode': 'normal',
-        'network_type': 'bridge',
-        #TODO: log_level_of should not be needed. To be modified in ovim
-        'log_level_of': 'DEBUG'
-    }
-    try:
-        # starts ovim library
-        ovim = ovim_module.ovim(ovim_configuration)
-
-        global wim_engine
-        wim_engine = wim or WimEngine(persistence)
-        wim_engine.ovim = ovim
-
-        ovim.start_service()
-
-        #delete old unneeded vim_wim_actions
-        clean_db(mydb)
-
-        # starts vim_threads
-        from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join '\
-                'datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
-        select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
-                   'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
-                   'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
-                   'user', 'passwd', 'dt.config as dt_config', 'nfvo_tenant_id')
-        vims = mydb.get_rows(FROM=from_, SELECT=select_)
-        for vim in vims:
-            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
-                   'datacenter_id': vim.get('datacenter_id')}
-            if vim["config"]:
-                extra.update(yaml.load(vim["config"]))
-            if vim.get('dt_config'):
-                extra.update(yaml.load(vim["dt_config"]))
-            if vim["type"] not in vimconn_imported:
-                module_info=None
-                try:
-                    module = "vimconn_" + vim["type"]
-                    pkg = __import__("osm_ro." + module)
-                    vim_conn = getattr(pkg, module)
-                    # module_info = imp.find_module(module, [__file__[:__file__.rfind("/")]])
-                    # vim_conn = imp.load_module(vim["type"], *module_info)
-                    vimconn_imported[vim["type"]] = vim_conn
-                except (IOError, ImportError) as e:
-                    # if module_info and module_info[0]:
-                    #    file.close(module_info[0])
-                    raise NfvoException("Unknown vim type '{}'. Cannot open file '{}.py'; {}: {}".format(
-                        vim["type"], module, type(e).__name__, str(e)), httperrors.Bad_Request)
-
-            thread_id = vim['datacenter_tenant_id']
-            vim_persistent_info[thread_id] = {}
-            try:
-                #if not tenant:
-                #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
-                myvim = vimconn_imported[ vim["type"] ].vimconnector(
-                    uuid=vim['datacenter_id'], name=vim['datacenter_name'],
-                    tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
-                    url=vim['vim_url'], url_admin=vim['vim_url_admin'],
-                    user=vim['user'], passwd=vim['passwd'],
-                    config=extra, persistent_info=vim_persistent_info[thread_id]
-                )
-            except vimconn.vimconnException as e:
-                myvim = e
-                logger.error("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
-                                                                               vim['datacenter_id'], e))
-            except Exception as e:
-                raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, e),
-                                    httperrors.Internal_Server_Error)
-            thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
-                                                vim['vim_tenant_id'])
-            new_thread = vim_thread.vim_thread(task_lock, thread_name, vim['datacenter_name'],
-                                               vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
-            new_thread.start()
-            vim_threads["running"][thread_id] = new_thread
-
-        wim_engine.start_threads()
-    except db_base_Exception as e:
-        raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
-    except ovim_module.ovimException as e:
-        message = str(e)
-        if message[:22] == "DATABASE wrong version":
-            message = "DATABASE wrong version of lib_osm_openvim {msg} -d{dbname} -u{dbuser} -p{dbpass} {ver}' "\
-                      "at host {dbhost}".format(
-                            msg=message[22:-3], dbname=global_config["db_ovim_name"],
-                            dbuser=global_config["db_ovim_user"], dbpass=global_config["db_ovim_passwd"],
-                            ver=message[-3:-1], dbhost=global_config["db_ovim_host"])
-        raise NfvoException(message, httperrors.Bad_Request)
-
-
-def stop_service():
-    global ovim, global_config
-    if ovim:
-        ovim.stop_service()
-    for thread_id, thread in vim_threads["running"].items():
-        thread.insert_task("exit")
-        vim_threads["deleting"][thread_id] = thread
-    vim_threads["running"] = {}
-
-    if wim_engine:
-        wim_engine.stop_threads()
-
-    if global_config and global_config.get("console_thread"):
-        for thread in global_config["console_thread"]:
-            thread.terminate = True
-
-def get_version():
-    return  ("openmanod version {} {}\n(c) Copyright Telefonica".format(global_config["version"],
-                                                                        global_config["version_date"] ))
-
-def clean_db(mydb):
-    """
-    Clean unused or old entries at database to avoid unlimited growing
-    :param mydb: database connector
-    :return: None
-    """
-    # get and delete unused vim_wim_actions: all elements deleted, one week before, instance not present
-    now = t.time()-3600*24*7
-    instance_action_id = None
-    nb_deleted = 0
-    while True:
-        actions_to_delete = mydb.get_rows(
-            SELECT=("item", "item_id", "instance_action_id"),
-            FROM="vim_wim_actions as va join instance_actions as ia on va.instance_action_id=ia.uuid "
-                    "left join instance_scenarios as i on ia.instance_id=i.uuid",
-            WHERE={"va.action": "DELETE", "va.modified_at<": now, "i.uuid": None,
-                   "va.status": ("DONE", "SUPERSEDED")},
-            LIMIT=100
-        )
-        for to_delete in actions_to_delete:
-            mydb.delete_row(FROM="vim_wim_actions", WHERE=to_delete)
-            if instance_action_id != to_delete["instance_action_id"]:
-                instance_action_id = to_delete["instance_action_id"]
-                mydb.delete_row(FROM="instance_actions", WHERE={"uuid": instance_action_id})
-        nb_deleted += len(actions_to_delete)
-        if len(actions_to_delete) < 100:
-            break
-    # clean locks
-    mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
-
-    if nb_deleted:
-        logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
-
-
-def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
-    '''Obtain flavorList
-    return result, content:
-        <0, error_text upon error
-        nb_records, flavor_list on success
-    '''
-    WHERE_dict={}
-    WHERE_dict['vnf_id'] = vnf_id
-    if nfvo_tenant is not None:
-        WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
-
-    #result, content = mydb.get_table(FROM='vms join vnfs on vms.vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
-    #result, content = mydb.get_table(FROM='vms',SELECT=('vim_flavor_id',),WHERE=WHERE_dict )
-    flavors = mydb.get_rows(FROM='vms join flavors on vms.flavor_id=flavors.uuid',SELECT=('flavor_id',),WHERE=WHERE_dict )
-    #print "get_flavor_list result:", result
-    #print "get_flavor_list content:", content
-    flavorList=[]
-    for flavor in flavors:
-        flavorList.append(flavor['flavor_id'])
-    return flavorList
-
-
-def get_imagelist(mydb, vnf_id, nfvo_tenant=None):
-    """
-    Get used images of all vms belonging to this VNFD
-    :param mydb: database conector
-    :param vnf_id: vnfd uuid
-    :param nfvo_tenant: tenant, not used
-    :return: The list of image uuid used
-    """
-    image_list = []
-    vms = mydb.get_rows(SELECT=('image_id','image_list'), FROM='vms', WHERE={'vnf_id': vnf_id})
-    for vm in vms:
-        if vm["image_id"] and vm["image_id"] not in image_list:
-            image_list.append(vm["image_id"])
-        if vm["image_list"]:
-            vm_image_list = yaml.load(vm["image_list"])
-            for image_dict in vm_image_list:
-                if image_dict["image_id"] not in image_list:
-                    image_list.append(image_dict["image_id"])
-    return image_list
-
-
-def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, datacenter_tenant_id=None,
-            vim_tenant=None, vim_tenant_name=None, vim_user=None, vim_passwd=None, ignore_errors=False):
-    '''Obtain a dictionary of VIM (datacenter) classes with some of the input parameters
-    return dictionary with {datacenter_id: vim_class, ... }. vim_class contain:
-            'nfvo_tenant_id','datacenter_id','vim_tenant_id','vim_url','vim_url_admin','datacenter_name','type','user','passwd'
-        raise exception upon error
-    '''
-    WHERE_dict={}
-    if nfvo_tenant     is not None:  WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
-    if datacenter_id   is not None:  WHERE_dict['d.uuid']  = datacenter_id
-    if datacenter_tenant_id is not None:  WHERE_dict['datacenter_tenant_id']  = datacenter_tenant_id
-    if datacenter_name is not None:  WHERE_dict['d.name']  = datacenter_name
-    if vim_tenant      is not None:  WHERE_dict['dt.vim_tenant_id']  = vim_tenant
-    if vim_tenant_name is not None:  WHERE_dict['vim_tenant_name']  = vim_tenant_name
-    if nfvo_tenant or vim_tenant or vim_tenant_name or datacenter_tenant_id:
-        from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
-        select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
-                   'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
-                   'user','passwd', 'dt.config as dt_config')
-    else:
-        from_ = 'datacenters as d'
-        select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name')
-    try:
-        vims = mydb.get_rows(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
-        vim_dict={}
-        for vim in vims:
-            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
-                   'datacenter_id': vim.get('datacenter_id'),
-                   '_vim_type_internal': vim.get('type')}
-            if vim["config"]:
-                extra.update(yaml.load(vim["config"]))
-            if vim.get('dt_config'):
-                extra.update(yaml.load(vim["dt_config"]))
-            if vim["type"] not in vimconn_imported:
-                module_info=None
-                try:
-                    module = "vimconn_" + vim["type"]
-                    pkg = __import__("osm_ro." + module)
-                    vim_conn = getattr(pkg, module)
-                    # module_info = imp.find_module(module, [__file__[:__file__.rfind("/")]])
-                    # vim_conn = imp.load_module(vim["type"], *module_info)
-                    vimconn_imported[vim["type"]] = vim_conn
-                except (IOError, ImportError) as e:
-                    # if module_info and module_info[0]:
-                    #     file.close(module_info[0])
-                    if ignore_errors:
-                        logger.error("Unknown vim type '{}'. Can not open file '{}.py'; {}: {}".format(
-                                            vim["type"], module, type(e).__name__, str(e)))
-                        continue
-                    raise NfvoException("Unknown vim type '{}'. Can not open file '{}.py'; {}: {}".format(
-                                            vim["type"], module, type(e).__name__, str(e)), httperrors.Bad_Request)
-
-            try:
-                if 'datacenter_tenant_id' in vim:
-                    thread_id = vim["datacenter_tenant_id"]
-                    if thread_id not in vim_persistent_info:
-                        vim_persistent_info[thread_id] = {}
-                    persistent_info = vim_persistent_info[thread_id]
-                else:
-                    persistent_info = {}
-                #if not tenant:
-                #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
-                vim_dict[ vim['datacenter_id'] ] = vimconn_imported[ vim["type"] ].vimconnector(
-                                uuid=vim['datacenter_id'], name=vim['datacenter_name'],
-                                tenant_id=vim.get('vim_tenant_id',vim_tenant),
-                                tenant_name=vim.get('vim_tenant_name',vim_tenant_name),
-                                url=vim['vim_url'], url_admin=vim['vim_url_admin'],
-                                user=vim.get('user',vim_user), passwd=vim.get('passwd',vim_passwd),
-                                config=extra, persistent_info=persistent_info
-                        )
-            except Exception as e:
-                if ignore_errors:
-                    logger.error("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)))
-                    continue
-                http_code = httperrors.Internal_Server_Error
-                if isinstance(e, vimconn.vimconnException):
-                    http_code = e.http_code
-                raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), http_code)
-        return vim_dict
-    except db_base_Exception as e:
-        raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
-
-
-def rollback(mydb,  vims, rollback_list):
-    undeleted_items=[]
-    #delete things by reverse order
-    for i in range(len(rollback_list)-1, -1, -1):
-        item = rollback_list[i]
-        if item["where"]=="vim":
-            if item["vim_id"] not in vims:
-                continue
-            if is_task_id(item["uuid"]):
-                continue
-            vim = vims[item["vim_id"]]
-            try:
-                if item["what"]=="image":
-                    vim.delete_image(item["uuid"])
-                    mydb.delete_row(FROM="datacenters_images", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
-                elif item["what"]=="flavor":
-                    vim.delete_flavor(item["uuid"])
-                    mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
-                elif item["what"]=="network":
-                    vim.delete_network(item["uuid"])
-                elif item["what"]=="vm":
-                    vim.delete_vminstance(item["uuid"])
-            except vimconn.vimconnException as e:
-                logger.error("Error in rollback. Not possible to delete VIM %s '%s'. Message: %s", item['what'], item["uuid"], str(e))
-                undeleted_items.append("{} {} from VIM {}".format(item['what'], item["uuid"], vim["name"]))
-            except db_base_Exception as e:
-                logger.error("Error in rollback. Not possible to delete %s '%s' from DB.datacenters Message: %s", item['what'], item["uuid"], str(e))
-
-        else: # where==mano
-            try:
-                if item["what"]=="image":
-                    mydb.delete_row(FROM="images", WHERE={"uuid": item["uuid"]})
-                elif item["what"]=="flavor":
-                    mydb.delete_row(FROM="flavors", WHERE={"uuid": item["uuid"]})
-            except db_base_Exception as e:
-                logger.error("Error in rollback. Not possible to delete %s '%s' from DB. Message: %s", item['what'], item["uuid"], str(e))
-                undeleted_items.append("{} '{}'".format(item['what'], item["uuid"]))
-    if len(undeleted_items)==0:
-        return True," Rollback successful."
-    else:
-        return False," Rollback fails to delete: " + str(undeleted_items)
-
-
-def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
-    global global_config
-    #create a dictionary with vnfc-name: vnfc:interface-list  key:values pairs
-    vnfc_interfaces={}
-    for vnfc in vnf_descriptor["vnf"]["VNFC"]:
-        name_dict = {}
-        #dataplane interfaces
-        for numa in vnfc.get("numas",() ):
-            for interface in numa.get("interfaces",()):
-                if interface["name"] in name_dict:
-                    raise NfvoException(
-                        "Error at vnf:VNFC[name:'{}']:numas:interfaces:name, interface name '{}' already used in this VNFC".format(
-                            vnfc["name"], interface["name"]),
-                        httperrors.Bad_Request)
-                name_dict[ interface["name"] ] = "underlay"
-        #bridge interfaces
-        for interface in vnfc.get("bridge-ifaces",() ):
-            if interface["name"] in name_dict:
-                raise NfvoException(
-                    "Error at vnf:VNFC[name:'{}']:bridge-ifaces:name, interface name '{}' already used in this VNFC".format(
-                        vnfc["name"], interface["name"]),
-                    httperrors.Bad_Request)
-            name_dict[ interface["name"] ] = "overlay"
-        vnfc_interfaces[ vnfc["name"] ] = name_dict
-        # check bood-data info
-        # if "boot-data" in vnfc:
-        #     # check that user-data is incompatible with users and config-files
-        #     if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
-        #         raise NfvoException(
-        #             "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
-        #             httperrors.Bad_Request)
-
-    #check if the info in external_connections matches with the one in the vnfcs
-    name_list=[]
-    for external_connection in vnf_descriptor["vnf"].get("external-connections",() ):
-        if external_connection["name"] in name_list:
-            raise NfvoException(
-                "Error at vnf:external-connections:name, value '{}' already used as an external-connection".format(
-                    external_connection["name"]),
-                httperrors.Bad_Request)
-        name_list.append(external_connection["name"])
-        if external_connection["VNFC"] not in vnfc_interfaces:
-            raise NfvoException(
-                "Error at vnf:external-connections[name:'{}']:VNFC, value '{}' does not match any VNFC".format(
-                    external_connection["name"], external_connection["VNFC"]),
-                httperrors.Bad_Request)
-
-        if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
-            raise NfvoException(
-                "Error at vnf:external-connections[name:'{}']:local_iface_name, value '{}' does not match any interface of this VNFC".format(
-                    external_connection["name"],
-                    external_connection["local_iface_name"]),
-                httperrors.Bad_Request )
-
-    #check if the info in internal_connections matches with the one in the vnfcs
-    name_list=[]
-    for internal_connection in vnf_descriptor["vnf"].get("internal-connections",() ):
-        if internal_connection["name"] in name_list:
-            raise NfvoException(
-                "Error at vnf:internal-connections:name, value '%s' already used as an internal-connection".format(
-                    internal_connection["name"]),
-                httperrors.Bad_Request)
-        name_list.append(internal_connection["name"])
-        #We should check that internal-connections of type "ptp" have only 2 elements
-
-        if len(internal_connection["elements"])>2 and (internal_connection.get("type") == "ptp" or internal_connection.get("type") == "e-line"):
-            raise NfvoException(
-                "Error at 'vnf:internal-connections[name:'{}']:elements', size must be 2 for a '{}' type. Consider change it to '{}' type".format(
-                    internal_connection["name"],
-                    'ptp' if vnf_descriptor_version==1 else 'e-line',
-                    'data' if vnf_descriptor_version==1 else "e-lan"),
-                httperrors.Bad_Request)
-        for port in internal_connection["elements"]:
-            vnf = port["VNFC"]
-            iface = port["local_iface_name"]
-            if vnf not in vnfc_interfaces:
-                raise NfvoException(
-                    "Error at vnf:internal-connections[name:'{}']:elements[]:VNFC, value '{}' does not match any VNFC".format(
-                        internal_connection["name"], vnf),
-                    httperrors.Bad_Request)
-            if iface not in vnfc_interfaces[ vnf ]:
-                raise NfvoException(
-                    "Error at vnf:internal-connections[name:'{}']:elements[]:local_iface_name, value '{}' does not match any interface of this VNFC".format(
-                        internal_connection["name"], iface),
-                    httperrors.Bad_Request)
-                return -httperrors.Bad_Request,
-            if vnf_descriptor_version==1 and "type" not in internal_connection:
-                if vnfc_interfaces[vnf][iface] == "overlay":
-                    internal_connection["type"] = "bridge"
-                else:
-                    internal_connection["type"] = "data"
-            if vnf_descriptor_version==2 and "implementation" not in internal_connection:
-                if vnfc_interfaces[vnf][iface] == "overlay":
-                    internal_connection["implementation"] = "overlay"
-                else:
-                    internal_connection["implementation"] = "underlay"
-            if (internal_connection.get("type") == "data" or internal_connection.get("type") == "ptp" or \
-                internal_connection.get("implementation") == "underlay") and vnfc_interfaces[vnf][iface] == "overlay":
-                raise NfvoException(
-                    "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
-                        internal_connection["name"],
-                        iface, 'bridge' if vnf_descriptor_version==1 else 'overlay',
-                        'data' if vnf_descriptor_version==1 else 'underlay'),
-                    httperrors.Bad_Request)
-            if (internal_connection.get("type") == "bridge" or internal_connection.get("implementation") == "overlay") and \
-                vnfc_interfaces[vnf][iface] == "underlay":
-                raise NfvoException(
-                    "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
-                        internal_connection["name"], iface,
-                        'data' if vnf_descriptor_version==1 else 'underlay',
-                        'bridge' if vnf_descriptor_version==1 else 'overlay'),
-                    httperrors.Bad_Request)
-
-
-def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=None):
-    #look if image exist
-    if only_create_at_vim:
-        image_mano_id = image_dict['uuid']
-        if return_on_error == None:
-            return_on_error = True
-    else:
-        if image_dict['location']:
-            images = mydb.get_rows(FROM="images", WHERE={'location':image_dict['location'], 'metadata':image_dict['metadata']})
-        else:
-            images = mydb.get_rows(FROM="images", WHERE={'universal_name':image_dict['universal_name'], 'checksum':image_dict['checksum']})
-        if len(images)>=1:
-            image_mano_id = images[0]['uuid']
-        else:
-            #create image in MANO DB
-            temp_image_dict={'name':image_dict['name'],         'description':image_dict.get('description',None),
-                            'location':image_dict['location'],  'metadata':image_dict.get('metadata',None),
-                            'universal_name':image_dict['universal_name'] , 'checksum':image_dict['checksum']
-                            }
-            #temp_image_dict['location'] = image_dict.get('new_location') if image_dict['location'] is None
-            image_mano_id = mydb.new_row('images', temp_image_dict, add_uuid=True)
-            rollback_list.append({"where":"mano", "what":"image","uuid":image_mano_id})
-    #create image at every vim
-    for vim_id,vim in vims.iteritems():
-        datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
-        image_created="false"
-        #look at database
-        image_db = mydb.get_rows(FROM="datacenters_images",
-                                 WHERE={'datacenter_vim_id': datacenter_vim_id, 'image_id': image_mano_id})
-        #look at VIM if this image exist
-        try:
-            if image_dict['location'] is not None:
-                image_vim_id = vim.get_image_id_from_path(image_dict['location'])
-            else:
-                filter_dict = {}
-                filter_dict['name'] = image_dict['universal_name']
-                if image_dict.get('checksum') != None:
-                    filter_dict['checksum'] = image_dict['checksum']
-                #logger.debug('>>>>>>>> Filter dict: %s', str(filter_dict))
-                vim_images = vim.get_image_list(filter_dict)
-                #logger.debug('>>>>>>>> VIM images: %s', str(vim_images))
-                if len(vim_images) > 1:
-                    raise vimconn.vimconnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), httperrors.Conflict)
-                elif len(vim_images) == 0:
-                    raise vimconn.vimconnNotFoundException("Image not found at VIM with filter: '{}'".format(str(filter_dict)))
-                else:
-                    #logger.debug('>>>>>>>> VIM image 0: %s', str(vim_images[0]))
-                    image_vim_id = vim_images[0]['id']
-
-        except vimconn.vimconnNotFoundException as e:
-            #Create the image in VIM only if image_dict['location'] or image_dict['new_location'] is not None
-            try:
-                #image_dict['location']=image_dict.get('new_location') if image_dict['location'] is None
-                if image_dict['location']:
-                    image_vim_id = vim.new_image(image_dict)
-                    rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"image","uuid":image_vim_id})
-                    image_created="true"
-                else:
-                    #If we reach this point, then the image has image name, and optionally checksum, and could not be found
-                    raise vimconn.vimconnException(str(e))
-            except vimconn.vimconnException as e:
-                if return_on_error:
-                    logger.error("Error creating image at VIM '%s': %s", vim["name"], str(e))
-                    raise
-                image_vim_id = None
-                logger.warn("Error creating image at VIM '%s': %s", vim["name"], str(e))
-                continue
-        except vimconn.vimconnException as e:
-            if return_on_error:
-                logger.error("Error contacting VIM to know if the image exists at VIM: %s", str(e))
-                raise
-            logger.warn("Error contacting VIM to know if the image exists at VIM: %s", str(e))
-            image_vim_id = None
-            continue
-        #if we reach here, the image has been created or existed
-        if len(image_db)==0:
-            #add new vim_id at datacenters_images
-            mydb.new_row('datacenters_images', {'datacenter_vim_id': datacenter_vim_id,
-                                                'image_id':image_mano_id,
-                                                'vim_id': image_vim_id,
-                                                'created':image_created})
-        elif image_db[0]["vim_id"]!=image_vim_id:
-            #modify existing vim_id at datacenters_images
-            mydb.update_rows('datacenters_images', UPDATE={'vim_id':image_vim_id}, WHERE={'datacenter_vim_id':vim_id, 'image_id':image_mano_id})
-
-    return image_vim_id if only_create_at_vim else image_mano_id
-
-
-def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
-    temp_flavor_dict= {'disk':flavor_dict.get('disk',0),
-            'ram':flavor_dict.get('ram'),
-            'vcpus':flavor_dict.get('vcpus'),
-        }
-    if 'extended' in flavor_dict and flavor_dict['extended']==None:
-        del flavor_dict['extended']
-    if 'extended' in flavor_dict:
-        temp_flavor_dict['extended']=yaml.safe_dump(flavor_dict['extended'],default_flow_style=True,width=256)
-
-    #look if flavor exist
-    if only_create_at_vim:
-        flavor_mano_id = flavor_dict['uuid']
-        if return_on_error == None:
-            return_on_error = True
-    else:
-        flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
-        if len(flavors)>=1:
-            flavor_mano_id = flavors[0]['uuid']
-        else:
-            #create flavor
-            #create one by one the images of aditional disks
-            dev_image_list=[] #list of images
-            if 'extended' in flavor_dict and flavor_dict['extended']!=None:
-                dev_nb=0
-                for device in flavor_dict['extended'].get('devices',[]):
-                    if "image" not in device and "image name" not in device:
-                        continue
-                    image_dict={}
-                    image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
-                    image_dict['universal_name']=device.get('image name')
-                    image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
-                    image_dict['location']=device.get('image')
-                    #image_dict['new_location']=vnfc.get('image location')
-                    image_dict['checksum']=device.get('image checksum')
-                    image_metadata_dict = device.get('image metadata', None)
-                    image_metadata_str = None
-                    if image_metadata_dict != None:
-                        image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
-                    image_dict['metadata']=image_metadata_str
-                    image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
-                    #print "Additional disk image id for VNFC %s: %s" % (flavor_dict['name']+str(dev_nb)+"-img", image_id)
-                    dev_image_list.append(image_id)
-                    dev_nb += 1
-            temp_flavor_dict['name'] = flavor_dict['name']
-            temp_flavor_dict['description'] = flavor_dict.get('description',None)
-            content = mydb.new_row('flavors', temp_flavor_dict, add_uuid=True)
-            flavor_mano_id= content
-            rollback_list.append({"where":"mano", "what":"flavor","uuid":flavor_mano_id})
-    #create flavor at every vim
-    if 'uuid' in flavor_dict:
-        del flavor_dict['uuid']
-    flavor_vim_id=None
-    for vim_id,vim in vims.items():
-        datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
-        flavor_created="false"
-        #look at database
-        flavor_db = mydb.get_rows(FROM="datacenters_flavors",
-                                  WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
-        #look at VIM if this flavor exist  SKIPPED
-        #res_vim, flavor_vim_id = vim.get_flavor_id_from_path(flavor_dict['location'])
-        #if res_vim < 0:
-        #    print "Error contacting VIM to know if the flavor %s existed previously." %flavor_vim_id
-        #    continue
-        #elif res_vim==0:
-
-        # Create the flavor in VIM
-        # Translate images at devices from MANO id to VIM id
-        disk_list = []
-        if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
-            # make a copy of original devices
-            devices_original=[]
-
-            for device in flavor_dict["extended"].get("devices",[]):
-                dev={}
-                dev.update(device)
-                devices_original.append(dev)
-                if 'image' in device:
-                    del device['image']
-                if 'image metadata' in device:
-                    del device['image metadata']
-                if 'image checksum' in device:
-                    del device['image checksum']
-            dev_nb = 0
-            for index in range(0,len(devices_original)) :
-                device=devices_original[index]
-                if "image" not in device and "image name" not in device:
-                    # if 'size' in device:
-                    disk_list.append({'size': device.get('size', default_volume_size), 'name': device.get('name')})
-                    continue
-                image_dict={}
-                image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
-                image_dict['universal_name']=device.get('image name')
-                image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
-                image_dict['location']=device.get('image')
-                # image_dict['new_location']=device.get('image location')
-                image_dict['checksum']=device.get('image checksum')
-                image_metadata_dict = device.get('image metadata', None)
-                image_metadata_str = None
-                if image_metadata_dict != None:
-                    image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
-                image_dict['metadata']=image_metadata_str
-                image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=return_on_error )
-                image_dict["uuid"]=image_mano_id
-                image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True, return_on_error=return_on_error)
-
-                #save disk information (image must be based on and size
-                disk_list.append({'image_id': image_vim_id, 'size': device.get('size', default_volume_size)})
-
-                flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
-                dev_nb += 1
-        if len(flavor_db)>0:
-            #check that this vim_id exist in VIM, if not create
-            flavor_vim_id=flavor_db[0]["vim_id"]
-            try:
-                vim.get_flavor(flavor_vim_id)
-                continue #flavor exist
-            except vimconn.vimconnException:
-                pass
-        #create flavor at vim
-        logger.debug("nfvo.create_or_use_flavor() adding flavor to VIM %s", vim["name"])
-        try:
-            flavor_vim_id = None
-            flavor_vim_id=vim.get_flavor_id_from_data(flavor_dict)
-            flavor_created="false"
-        except vimconn.vimconnException as e:
-            pass
-        try:
-            if not flavor_vim_id:
-                flavor_vim_id = vim.new_flavor(flavor_dict)
-                rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"flavor","uuid":flavor_vim_id})
-                flavor_created="true"
-        except vimconn.vimconnException as e:
-            if return_on_error:
-                logger.error("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
-                raise
-            logger.warn("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
-            flavor_vim_id = None
-            continue
-        #if reach here the flavor has been create or exist
-        if len(flavor_db)==0:
-            #add new vim_id at datacenters_flavors
-            extended_devices_yaml = None
-            if len(disk_list) > 0:
-                extended_devices = dict()
-                extended_devices['disks'] = disk_list
-                extended_devices_yaml = yaml.safe_dump(extended_devices,default_flow_style=True,width=256)
-            mydb.new_row('datacenters_flavors',
-                        {'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id, 'vim_id': flavor_vim_id,
-                        'created': flavor_created, 'extended': extended_devices_yaml})
-        elif flavor_db[0]["vim_id"]!=flavor_vim_id:
-            #modify existing vim_id at datacenters_flavors
-            mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id},
-                             WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
-
-    return flavor_vim_id if only_create_at_vim else flavor_mano_id
-
-
-def get_str(obj, field, length):
-    """
-    Obtain the str value,
-    :param obj:
-    :param length:
-    :return:
-    """
-    value = obj.get(field)
-    if value is not None:
-        value = str(value)[:length]
-    return value
-
-def _lookfor_or_create_image(db_image, mydb, descriptor):
-    """
-    fill image content at db_image dictionary. Check if the image with this image and checksum exist
-    :param db_image: dictionary to insert data
-    :param mydb: database connector
-    :param descriptor: yang descriptor
-    :return: uuid if the image exist at DB, or None if a new image must be created with the data filled at db_image
-    """
-
-    db_image["name"] = get_str(descriptor, "image", 255)
-    db_image["checksum"] = get_str(descriptor, "image-checksum", 32)
-    if not db_image["checksum"]:  # Ensure that if empty string, None is stored
-        db_image["checksum"] = None
-    if db_image["name"].startswith("/"):
-        db_image["location"] = db_image["name"]
-        existing_images = mydb.get_rows(FROM="images", WHERE={'location': db_image["location"]})
-    else:
-        db_image["universal_name"] = db_image["name"]
-        existing_images = mydb.get_rows(FROM="images", WHERE={'universal_name': db_image['universal_name'],
-                                                              'checksum': db_image['checksum']})
-    if existing_images:
-        return existing_images[0]["uuid"]
-    else:
-        image_uuid = str(uuid4())
-        db_image["uuid"] = image_uuid
-        return None
-
-def get_resource_allocation_params(quota_descriptor):
-    """
-    read the quota_descriptor from vnfd and fetch the resource allocation properties from the descriptor object
-    :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
-    :return: quota params for limit, reserve, shares from the descriptor object
-    """
-    quota = {}
-    if quota_descriptor.get("limit"):
-        quota["limit"] = int(quota_descriptor["limit"])
-    if quota_descriptor.get("reserve"):
-        quota["reserve"] = int(quota_descriptor["reserve"])
-    if quota_descriptor.get("shares"):
-        quota["shares"] = int(quota_descriptor["shares"])
-    return quota
-
-def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
-    """
-    Parses an OSM IM vnfd_catalog and insert at DB
-    :param mydb:
-    :param tenant_id:
-    :param vnf_descriptor:
-    :return: The list of cretated vnf ids
-    """
-    try:
-        myvnfd = vnfd_catalog.vnfd()
-        try:
-            pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,
-                                             skip_unknown=True)
-        except Exception as e:
-            raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
-        db_vnfs = []
-        db_nets = []
-        db_vms = []
-        db_vms_index = 0
-        db_interfaces = []
-        db_images = []
-        db_flavors = []
-        db_ip_profiles_index = 0
-        db_ip_profiles = []
-        uuid_list = []
-        vnfd_uuid_list = []
-        vnfd_catalog_descriptor = vnf_descriptor.get("vnfd:vnfd-catalog")
-        if not vnfd_catalog_descriptor:
-            vnfd_catalog_descriptor = vnf_descriptor.get("vnfd-catalog")
-        vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd")
-        if not vnfd_descriptor_list:
-            vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd:vnfd")
-        for vnfd_yang in myvnfd.vnfd_catalog.vnfd.itervalues():
-            vnfd = vnfd_yang.get()
-
-            # table vnf
-            vnf_uuid = str(uuid4())
-            uuid_list.append(vnf_uuid)
-            vnfd_uuid_list.append(vnf_uuid)
-            vnfd_id = get_str(vnfd, "id", 255)
-            db_vnf = {
-                "uuid": vnf_uuid,
-                "osm_id": vnfd_id,
-                "name": get_str(vnfd, "name", 255),
-                "description": get_str(vnfd, "description", 255),
-                "tenant_id": tenant_id,
-                "vendor": get_str(vnfd, "vendor", 255),
-                "short_name": get_str(vnfd, "short-name", 255),
-                "descriptor": str(vnf_descriptor)[:60000]
-            }
-
-            for vnfd_descriptor in vnfd_descriptor_list:
-                if vnfd_descriptor["id"] == str(vnfd["id"]):
-                    break
-
-            # table ip_profiles (ip-profiles)
-            ip_profile_name2db_table_index = {}
-            for ip_profile in vnfd.get("ip-profiles").itervalues():
-                db_ip_profile = {
-                    "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
-                    "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
-                    "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
-                    "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
-                    "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
-                    "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
-                }
-                dns_list = []
-                for dns in ip_profile["ip-profile-params"]["dns-server"].itervalues():
-                    dns_list.append(str(dns.get("address")))
-                db_ip_profile["dns_address"] = ";".join(dns_list)
-                if ip_profile["ip-profile-params"].get('security-group'):
-                    db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
-                ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
-                db_ip_profiles_index += 1
-                db_ip_profiles.append(db_ip_profile)
-
-            # table nets (internal-vld)
-            net_id2uuid = {}  # for mapping interface with network
-            for vld in vnfd.get("internal-vld").itervalues():
-                net_uuid = str(uuid4())
-                uuid_list.append(net_uuid)
-                db_net = {
-                    "name": get_str(vld, "name", 255),
-                    "vnf_id": vnf_uuid,
-                    "uuid": net_uuid,
-                    "description": get_str(vld, "description", 255),
-                    "osm_id": get_str(vld, "id", 255),
-                    "type": "bridge",   # TODO adjust depending on connection point type
-                }
-                net_id2uuid[vld.get("id")] = net_uuid
-                db_nets.append(db_net)
-                # ip-profile, link db_ip_profile with db_sce_net
-                if vld.get("ip-profile-ref"):
-                    ip_profile_name = vld.get("ip-profile-ref")
-                    if ip_profile_name not in ip_profile_name2db_table_index:
-                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':"
-                                            "'{}'. Reference to a non-existing 'ip_profiles'".format(
-                                                str(vnfd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
-                                            httperrors.Bad_Request)
-                    db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["net_id"] = net_uuid
-                else:  #check no ip-address has been defined
-                    for icp in vld.get("internal-connection-point").itervalues():
-                        if icp.get("ip-address"):
-                            raise NfvoException("Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' "
-                                            "contains an ip-address but no ip-profile has been defined at VLD".format(
-                                                str(vnfd["id"]), str(vld["id"]), str(icp["id"])),
-                                            httperrors.Bad_Request)
-
-            # connection points vaiable declaration
-            cp_name2iface_uuid = {}
-            cp_name2vdu_id = {}
-            cp_name2vm_uuid = {}
-            cp_name2db_interface = {}
-            vdu_id2cp_name = {}  # stored only when one external connection point is presented at this VDU
-
-            # table vms (vdus)
-            vdu_id2uuid = {}
-            vdu_id2db_table_index = {}
-            mgmt_access = {}
-            for vdu in vnfd.get("vdu").itervalues():
-
-                for vdu_descriptor in vnfd_descriptor["vdu"]:
-                    if vdu_descriptor["id"] == str(vdu["id"]):
-                        break
-                vm_uuid = str(uuid4())
-                uuid_list.append(vm_uuid)
-                vdu_id = get_str(vdu, "id", 255)
-                db_vm = {
-                    "uuid": vm_uuid,
-                    "osm_id": vdu_id,
-                    "name": get_str(vdu, "name", 255),
-                    "description": get_str(vdu, "description", 255),
-                    "pdu_type": get_str(vdu, "pdu-type", 255),
-                    "vnf_id": vnf_uuid,
-                }
-                vdu_id2uuid[db_vm["osm_id"]] = vm_uuid
-                vdu_id2db_table_index[db_vm["osm_id"]] = db_vms_index
-                if vdu.get("count"):
-                    db_vm["count"] = int(vdu["count"])
-
-                # table image
-                image_present = False
-                if vdu.get("image"):
-                    image_present = True
-                    db_image = {}
-                    image_uuid = _lookfor_or_create_image(db_image, mydb, vdu)
-                    if not image_uuid:
-                        image_uuid = db_image["uuid"]
-                        db_images.append(db_image)
-                    db_vm["image_id"] = image_uuid
-                if vdu.get("alternative-images"):
-                    vm_alternative_images = []
-                    for alt_image in vdu.get("alternative-images").itervalues():
-                        db_image = {}
-                        image_uuid = _lookfor_or_create_image(db_image, mydb, alt_image)
-                        if not image_uuid:
-                            image_uuid = db_image["uuid"]
-                            db_images.append(db_image)
-                        vm_alternative_images.append({
-                            "image_id": image_uuid,
-                            "vim_type": str(alt_image["vim-type"]),
-                            # "universal_name": str(alt_image["image"]),
-                            # "checksum": str(alt_image["image-checksum"]) if alt_image.get("image-checksum") else None
-                        })
-
-                    db_vm["image_list"] = yaml.safe_dump(vm_alternative_images, default_flow_style=True, width=256)
-
-                # volumes
-                devices = []
-                if vdu.get("volumes"):
-                    for volume_key in vdu["volumes"]:
-                        volume = vdu["volumes"][volume_key]
-                        if not image_present:
-                            # Convert the first volume to vnfc.image
-                            image_present = True
-                            db_image = {}
-                            image_uuid = _lookfor_or_create_image(db_image, mydb, volume)
-                            if not image_uuid:
-                                image_uuid = db_image["uuid"]
-                                db_images.append(db_image)
-                            db_vm["image_id"] = image_uuid
-                        else:
-                            # Add Openmano devices
-                            device = {"name": str(volume.get("name"))}
-                            device["type"] = str(volume.get("device-type"))
-                            if volume.get("size"):
-                                device["size"] = int(volume["size"])
-                            if volume.get("image"):
-                                device["image name"] = str(volume["image"])
-                                if volume.get("image-checksum"):
-                                    device["image checksum"] = str(volume["image-checksum"])
-
-                            devices.append(device)
-
-                if not db_vm.get("image_id"):
-                    if not db_vm["pdu_type"]:
-                        raise NfvoException("Not defined image for VDU")
-                    # create a fake image
-
-                # cloud-init
-                boot_data = {}
-                if vdu.get("cloud-init"):
-                    boot_data["user-data"] = str(vdu["cloud-init"])
-                elif vdu.get("cloud-init-file"):
-                    # TODO Where this file content is present???
-                    # boot_data["user-data"] = vnfd_yang.files[vdu["cloud-init-file"]]
-                    boot_data["user-data"] = str(vdu["cloud-init-file"])
-
-                if vdu.get("supplemental-boot-data"):
-                    if vdu["supplemental-boot-data"].get('boot-data-drive'):
-                            boot_data['boot-data-drive'] = True
-                    if vdu["supplemental-boot-data"].get('config-file'):
-                        om_cfgfile_list = list()
-                        for custom_config_file in vdu["supplemental-boot-data"]['config-file'].itervalues():
-                            # TODO Where this file content is present???
-                            cfg_source = str(custom_config_file["source"])
-                            om_cfgfile_list.append({"dest": custom_config_file["dest"],
-                                                    "content": cfg_source})
-                        boot_data['config-files'] = om_cfgfile_list
-                if boot_data:
-                    db_vm["boot_data"] = yaml.safe_dump(boot_data, default_flow_style=True, width=256)
-
-                db_vms.append(db_vm)
-                db_vms_index += 1
-
-                # table interfaces (internal/external interfaces)
-                flavor_epa_interfaces = []
-                # for iface in chain(vdu.get("internal-interface").itervalues(), vdu.get("external-interface").itervalues()):
-                for iface in vdu.get("interface").itervalues():
-                    flavor_epa_interface = {}
-                    iface_uuid = str(uuid4())
-                    uuid_list.append(iface_uuid)
-                    db_interface = {
-                        "uuid": iface_uuid,
-                        "internal_name": get_str(iface, "name", 255),
-                        "vm_id": vm_uuid,
-                    }
-                    flavor_epa_interface["name"] = db_interface["internal_name"]
-                    if iface.get("virtual-interface").get("vpci"):
-                        db_interface["vpci"] = get_str(iface.get("virtual-interface"), "vpci", 12)
-                        flavor_epa_interface["vpci"] = db_interface["vpci"]
-
-                    if iface.get("virtual-interface").get("bandwidth"):
-                        bps = int(iface.get("virtual-interface").get("bandwidth"))
-                        db_interface["bw"] = int(math.ceil(bps/1000000.0))
-                        flavor_epa_interface["bandwidth"] = "{} Mbps".format(db_interface["bw"])
-
-                    if iface.get("virtual-interface").get("type") == "OM-MGMT":
-                        db_interface["type"] = "mgmt"
-                    elif iface.get("virtual-interface").get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
-                        db_interface["type"] = "bridge"
-                        db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
-                    elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
-                        db_interface["type"] = "data"
-                        db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
-                        flavor_epa_interface["dedicated"] = "no" if iface["virtual-interface"]["type"] == "SR-IOV" \
-                            else "yes"
-                        flavor_epa_interfaces.append(flavor_epa_interface)
-                    else:
-                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual"
-                                            "-interface':'type':'{}'. Interface type is not supported".format(
-                                                vnfd_id, vdu_id, iface.get("virtual-interface").get("type")),
-                                            httperrors.Bad_Request)
-
-                    if iface.get("mgmt-interface"):
-                        db_interface["type"] = "mgmt"
-
-                    if iface.get("external-connection-point-ref"):
-                        try:
-                            cp = vnfd.get("connection-point")[iface.get("external-connection-point-ref")]
-                            db_interface["external_name"] = get_str(cp, "name", 255)
-                            cp_name2iface_uuid[db_interface["external_name"]] = iface_uuid
-                            cp_name2vdu_id[db_interface["external_name"]] = vdu_id
-                            cp_name2vm_uuid[db_interface["external_name"]] = vm_uuid
-                            cp_name2db_interface[db_interface["external_name"]] = db_interface
-                            for cp_descriptor in vnfd_descriptor["connection-point"]:
-                                if cp_descriptor["name"] == db_interface["external_name"]:
-                                    break
-                            else:
-                                raise KeyError()
-
-                            if vdu_id in vdu_id2cp_name:
-                                vdu_id2cp_name[vdu_id] = None  # more than two connecdtion point for this VDU
-                            else:
-                                vdu_id2cp_name[vdu_id] = db_interface["external_name"]
-
-                            # port security
-                            if str(cp_descriptor.get("port-security-enabled")).lower() == "false":
-                                db_interface["port_security"] = 0
-                            elif str(cp_descriptor.get("port-security-enabled")).lower() == "true":
-                                db_interface["port_security"] = 1
-                        except KeyError:
-                            raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
-                                                "'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present"
-                                                " at connection-point".format(
-                                                    vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
-                                                    cp=iface.get("vnfd-connection-point-ref")),
-                                                httperrors.Bad_Request)
-                    elif iface.get("internal-connection-point-ref"):
-                        try:
-                            for icp_descriptor in vdu_descriptor["internal-connection-point"]:
-                                if icp_descriptor["id"] == str(iface.get("internal-connection-point-ref")):
-                                    break
-                            else:
-                                raise KeyError("does not exist at vdu:internal-connection-point")
-                            icp = None
-                            icp_vld = None
-                            for vld in vnfd.get("internal-vld").itervalues():
-                                for cp in vld.get("internal-connection-point").itervalues():
-                                    if cp.get("id-ref") == iface.get("internal-connection-point-ref"):
-                                        if icp:
-                                            raise KeyError("is referenced by more than one 'internal-vld'")
-                                        icp = cp
-                                        icp_vld = vld
-                            if not icp:
-                                raise KeyError("is not referenced by any 'internal-vld'")
-
-                            db_interface["net_id"] = net_id2uuid[icp_vld.get("id")]
-                            if str(icp_descriptor.get("port-security-enabled")).lower() == "false":
-                                db_interface["port_security"] = 0
-                            elif str(icp_descriptor.get("port-security-enabled")).lower() == "true":
-                                db_interface["port_security"] = 1
-                            if icp.get("ip-address"):
-                                if not icp_vld.get("ip-profile-ref"):
-                                    raise NfvoException
-                                db_interface["ip_address"] = str(icp.get("ip-address"))
-                        except KeyError as e:
-                            raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
-                                                "'interface[{iface}]':'internal-connection-point-ref':'{cp}'"
-                                                " {msg}".format(
-                                                    vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
-                                                    cp=iface.get("internal-connection-point-ref"), msg=str(e)),
-                                                httperrors.Bad_Request)
-                    if iface.get("position"):
-                        db_interface["created_at"] = int(iface.get("position")) * 50
-                    if iface.get("mac-address"):
-                        db_interface["mac"] = str(iface.get("mac-address"))
-                    db_interfaces.append(db_interface)
-
-                # table flavors
-                db_flavor = {
-                    "name": get_str(vdu, "name", 250) + "-flv",
-                    "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)),
-                    "ram": int(vdu["vm-flavor"].get("memory-mb", 1)),
-                    "disk": int(vdu["vm-flavor"].get("storage-gb", 0)),
-                }
-                # TODO revise the case of several numa-node-policy node
-                extended = {}
-                numa = {}
-                if devices:
-                    extended["devices"] = devices
-                if flavor_epa_interfaces:
-                    numa["interfaces"] = flavor_epa_interfaces
-                if vdu.get("guest-epa"):   # TODO or dedicated_int:
-                    epa_vcpu_set = False
-                    if vdu["guest-epa"].get("numa-node-policy"):  # TODO or dedicated_int:
-                        numa_node_policy = vdu["guest-epa"].get("numa-node-policy")
-                        if numa_node_policy.get("node"):
-                            numa_node = numa_node_policy["node"].values()[0]
-                            if numa_node.get("num-cores"):
-                                numa["cores"] = numa_node["num-cores"]
-                                epa_vcpu_set = True
-                            if numa_node.get("paired-threads"):
-                                if numa_node["paired-threads"].get("num-paired-threads"):
-                                    numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
-                                    epa_vcpu_set = True
-                                if len(numa_node["paired-threads"].get("paired-thread-ids")):
-                                    numa["paired-threads-id"] = []
-                                    for pair in numa_node["paired-threads"]["paired-thread-ids"].itervalues():
-                                        numa["paired-threads-id"].append(
-                                            (str(pair["thread-a"]), str(pair["thread-b"]))
-                                        )
-                            if numa_node.get("num-threads"):
-                                numa["threads"] = int(numa_node["num-threads"])
-                                epa_vcpu_set = True
-                            if numa_node.get("memory-mb"):
-                                numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
-                    if vdu["guest-epa"].get("mempage-size"):
-                        if vdu["guest-epa"]["mempage-size"] != "SMALL":
-                            numa["memory"] = max(int(db_flavor["ram"] / 1024), 1)
-                    if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
-                        if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
-                            if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \
-                                            vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
-                                numa["cores"] = max(db_flavor["vcpus"], 1)
-                            else:
-                                numa["threads"] = max(db_flavor["vcpus"], 1)
-                            epa_vcpu_set = True
-                    if vdu["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
-                        cpuquota = get_resource_allocation_params(vdu["guest-epa"].get("cpu-quota"))
-                        if cpuquota:
-                            extended["cpu-quota"] = cpuquota
-                    if vdu["guest-epa"].get("mem-quota"):
-                        vduquota = get_resource_allocation_params(vdu["guest-epa"].get("mem-quota"))
-                        if vduquota:
-                            extended["mem-quota"] = vduquota
-                    if vdu["guest-epa"].get("disk-io-quota"):
-                        diskioquota = get_resource_allocation_params(vdu["guest-epa"].get("disk-io-quota"))
-                        if diskioquota:
-                            extended["disk-io-quota"] = diskioquota
-                    if vdu["guest-epa"].get("vif-quota"):
-                        vifquota = get_resource_allocation_params(vdu["guest-epa"].get("vif-quota"))
-                        if vifquota:
-                            extended["vif-quota"] = vifquota
-                if numa:
-                    extended["numas"] = [numa]
-                if extended:
-                    extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)
-                    db_flavor["extended"] = extended_text
-                # look if flavor exist
-                temp_flavor_dict = {'disk': db_flavor.get('disk', 0),
-                                    'ram': db_flavor.get('ram'),
-                                    'vcpus': db_flavor.get('vcpus'),
-                                    'extended': db_flavor.get('extended')
-                                    }
-                existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
-                if existing_flavors:
-                    flavor_uuid = existing_flavors[0]["uuid"]
-                else:
-                    flavor_uuid = str(uuid4())
-                    uuid_list.append(flavor_uuid)
-                    db_flavor["uuid"] = flavor_uuid
-                    db_flavors.append(db_flavor)
-                db_vm["flavor_id"] = flavor_uuid
-
-            # VNF affinity and antiaffinity
-            for pg in vnfd.get("placement-groups").itervalues():
-                pg_name = get_str(pg, "name", 255)
-                for vdu in pg.get("member-vdus").itervalues():
-                    vdu_id = get_str(vdu, "member-vdu-ref", 255)
-                    if vdu_id not in vdu_id2db_table_index:
-                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':"
-                                            "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
-                                                vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
-                                            httperrors.Bad_Request)
-                    db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
-                    # TODO consider the case of isolation and not colocation
-                    # if pg.get("strategy") == "ISOLATION":
-
-            # VNF mgmt configuration
-            if vnfd["mgmt-interface"].get("vdu-id"):
-                mgmt_vdu_id = get_str(vnfd["mgmt-interface"], "vdu-id", 255)
-                if mgmt_vdu_id not in vdu_id2uuid:
-                    raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':"
-                                        "'{vdu}'. Reference to a non-existing vdu".format(
-                                            vnf=vnfd_id, vdu=mgmt_vdu_id),
-                                        httperrors.Bad_Request)
-                mgmt_access["vm_id"] = vdu_id2uuid[vnfd["mgmt-interface"]["vdu-id"]]
-                mgmt_access["vdu-id"] = vnfd["mgmt-interface"]["vdu-id"]
-                # if only one cp is defined by this VDU, mark this interface as of type "mgmt"
-                if vdu_id2cp_name.get(mgmt_vdu_id):
-                    if cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]:
-                        cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
-
-            if vnfd["mgmt-interface"].get("ip-address"):
-                mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address"))
-            if vnfd["mgmt-interface"].get("cp"):
-                if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid:
-                    raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. "
-                                        "Reference to a non-existing connection-point".format(
-                                            vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
-                                        httperrors.Bad_Request)
-                mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
-                mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
-                mgmt_access["vdu-id"] = cp_name2vdu_id[vnfd["mgmt-interface"]["cp"]]
-                # mark this interface as of type mgmt
-                if cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]:
-                    cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]["type"] = "mgmt"
-
-            default_user = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
-                                    "default-user", 64)
-            if default_user:
-                mgmt_access["default_user"] = default_user
-
-            required = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
-                                   "required", 6)
-            if required:
-                mgmt_access["required"] = required
-
-            password_ = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}),
-                                   "password", 64)
-            if password_:
-                mgmt_access["password"] = password_
-
-            if mgmt_access:
-                db_vnf["mgmt_access"] = yaml.safe_dump(mgmt_access, default_flow_style=True, width=256)
-
-            db_vnfs.append(db_vnf)
-        db_tables=[
-            {"vnfs": db_vnfs},
-            {"nets": db_nets},
-            {"images": db_images},
-            {"flavors": db_flavors},
-            {"ip_profiles": db_ip_profiles},
-            {"vms": db_vms},
-            {"interfaces": db_interfaces},
-        ]
-
-        logger.debug("create_vnf Deployment done vnfDict: %s",
-                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
-        mydb.new_rows(db_tables, uuid_list)
-        return vnfd_uuid_list
-    except NfvoException:
-        raise
-    except Exception as e:
-        logger.error("Exception {}".format(e))
-        raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
-
-
-@deprecated("Use new_vnfd_v3")
-def new_vnf(mydb, tenant_id, vnf_descriptor):
-    global global_config
-
-    # Step 1. Check the VNF descriptor
-    check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1)
-    # Step 2. Check tenant exist
-    vims = {}
-    if tenant_id != "any":
-        check_tenant(mydb, tenant_id)
-        if "tenant_id" in vnf_descriptor["vnf"]:
-            if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
-                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
-                                    httperrors.Unauthorized)
-        else:
-            vnf_descriptor['vnf']['tenant_id'] = tenant_id
-        # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
-        if global_config["auto_push_VNF_to_VIMs"]:
-            vims = get_vim(mydb, tenant_id, ignore_errors=True)
-
-    # Step 4. Review the descriptor and add missing  fields
-    #print vnf_descriptor
-    #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
-    vnf_name = vnf_descriptor['vnf']['name']
-    vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
-    if "physical" in vnf_descriptor['vnf']:
-        del vnf_descriptor['vnf']['physical']
-    #print vnf_descriptor
-
-    # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
-    logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
-    logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
-
-    #For each VNFC, we add it to the VNFCDict and we  create a flavor.
-    VNFCDict = {}     # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
-    rollback_list = []    # It will contain the new images created in mano. It is used for rollback
-    try:
-        logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
-        for vnfc in vnf_descriptor['vnf']['VNFC']:
-            VNFCitem={}
-            VNFCitem["name"] = vnfc['name']
-            VNFCitem["availability_zone"] = vnfc.get('availability_zone')
-            VNFCitem["description"] = vnfc.get("description", 'VM %s of the VNF %s' %(vnfc['name'],vnf_name))
-
-            #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
-
-            myflavorDict = {}
-            myflavorDict["name"] = vnfc['name']+"-flv"   #Maybe we could rename the flavor by using the field "image name" if exists
-            myflavorDict["description"] = VNFCitem["description"]
-            myflavorDict["ram"] = vnfc.get("ram", 0)
-            myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
-            myflavorDict["disk"] = vnfc.get("disk", 0)
-            myflavorDict["extended"] = {}
-
-            devices = vnfc.get("devices")
-            if devices != None:
-                myflavorDict["extended"]["devices"] = devices
-
-            # TODO:
-            # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
-            # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
-
-            # Previous code has been commented
-            #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
-            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 200
-            #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
-            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 300
-            #else:
-            #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
-            #    if result2:
-            #        print "Error creating flavor: unknown processor model. Rollback successful."
-            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
-            #    else:
-            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
-            myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
-
-            if 'numas' in vnfc and len(vnfc['numas'])>0:
-                myflavorDict['extended']['numas'] = vnfc['numas']
-
-            #print myflavorDict
-
-            # Step 6.2 New flavors are created in the VIM
-            flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
-
-            #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
-            VNFCitem["flavor_id"] = flavor_id
-            VNFCDict[vnfc['name']] = VNFCitem
-
-        logger.debug("Creating new images in the VIM for each VNFC")
-        # Step 6.3 New images are created in the VIM
-        #For each VNFC, we must create the appropriate image.
-        #This "for" loop might be integrated with the previous one
-        #In case this integration is made, the VNFCDict might become a VNFClist.
-        for vnfc in vnf_descriptor['vnf']['VNFC']:
-            #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
-            image_dict={}
-            image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
-            image_dict['universal_name']=vnfc.get('image name')
-            image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
-            image_dict['location']=vnfc.get('VNFC image')
-            #image_dict['new_location']=vnfc.get('image location')
-            image_dict['checksum']=vnfc.get('image checksum')
-            image_metadata_dict = vnfc.get('image metadata', None)
-            image_metadata_str = None
-            if image_metadata_dict is not None:
-                image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
-            image_dict['metadata']=image_metadata_str
-            #print "create_or_use_image", mydb, vims, image_dict, rollback_list
-            image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
-            #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
-            VNFCDict[vnfc['name']]["image_id"] = image_id
-            VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
-            VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
-            if vnfc.get("boot-data"):
-                VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
-
-
-        # Step 7. Storing the VNF descriptor in the repository
-        if "descriptor" not in vnf_descriptor["vnf"]:
-            vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
-
-        # Step 8. Adding the VNF to the NFVO DB
-        vnf_id = mydb.new_vnf_as_a_whole(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
-        return vnf_id
-    except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
-        _, message = rollback(mydb, vims, rollback_list)
-        if isinstance(e, db_base_Exception):
-            error_text = "Exception at database"
-        elif isinstance(e, KeyError):
-            error_text = "KeyError exception "
-            e.http_code = httperrors.Internal_Server_Error
-        else:
-            error_text = "Exception at VIM"
-        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
-        #logger.error("start_scenario %s", error_text)
-        raise NfvoException(error_text, e.http_code)
-
-
-@deprecated("Use new_vnfd_v3")
-def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
-    global global_config
-
-    # Step 1. Check the VNF descriptor
-    check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=2)
-    # Step 2. Check tenant exist
-    vims = {}
-    if tenant_id != "any":
-        check_tenant(mydb, tenant_id)
-        if "tenant_id" in vnf_descriptor["vnf"]:
-            if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
-                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
-                                    httperrors.Unauthorized)
-        else:
-            vnf_descriptor['vnf']['tenant_id'] = tenant_id
-        # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
-        if global_config["auto_push_VNF_to_VIMs"]:
-            vims = get_vim(mydb, tenant_id, ignore_errors=True)
-
-    # Step 4. Review the descriptor and add missing  fields
-    #print vnf_descriptor
-    #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
-    vnf_name = vnf_descriptor['vnf']['name']
-    vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
-    if "physical" in vnf_descriptor['vnf']:
-        del vnf_descriptor['vnf']['physical']
-    #print vnf_descriptor
-
-    # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
-    logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
-    logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
-
-    #For each VNFC, we add it to the VNFCDict and we  create a flavor.
-    VNFCDict = {}     # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
-    rollback_list = []    # It will contain the new images created in mano. It is used for rollback
-    try:
-        logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
-        for vnfc in vnf_descriptor['vnf']['VNFC']:
-            VNFCitem={}
-            VNFCitem["name"] = vnfc['name']
-            VNFCitem["description"] = vnfc.get("description", 'VM %s of the VNF %s' %(vnfc['name'],vnf_name))
-
-            #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
-
-            myflavorDict = {}
-            myflavorDict["name"] = vnfc['name']+"-flv"   #Maybe we could rename the flavor by using the field "image name" if exists
-            myflavorDict["description"] = VNFCitem["description"]
-            myflavorDict["ram"] = vnfc.get("ram", 0)
-            myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
-            myflavorDict["disk"] = vnfc.get("disk", 0)
-            myflavorDict["extended"] = {}
-
-            devices = vnfc.get("devices")
-            if devices != None:
-                myflavorDict["extended"]["devices"] = devices
-
-            # TODO:
-            # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
-            # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
-
-            # Previous code has been commented
-            #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
-            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 200
-            #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
-            #    myflavorDict["flavor"]['extended']['processor_ranking'] = 300
-            #else:
-            #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
-            #    if result2:
-            #        print "Error creating flavor: unknown processor model. Rollback successful."
-            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
-            #    else:
-            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
-            myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
-
-            if 'numas' in vnfc and len(vnfc['numas'])>0:
-                myflavorDict['extended']['numas'] = vnfc['numas']
-
-            #print myflavorDict
-
-            # Step 6.2 New flavors are created in the VIM
-            flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
-
-            #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
-            VNFCitem["flavor_id"] = flavor_id
-            VNFCDict[vnfc['name']] = VNFCitem
-
-        logger.debug("Creating new images in the VIM for each VNFC")
-        # Step 6.3 New images are created in the VIM
-        #For each VNFC, we must create the appropriate image.
-        #This "for" loop might be integrated with the previous one
-        #In case this integration is made, the VNFCDict might become a VNFClist.
-        for vnfc in vnf_descriptor['vnf']['VNFC']:
-            #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
-            image_dict={}
-            image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
-            image_dict['universal_name']=vnfc.get('image name')
-            image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
-            image_dict['location']=vnfc.get('VNFC image')
-            #image_dict['new_location']=vnfc.get('image location')
-            image_dict['checksum']=vnfc.get('image checksum')
-            image_metadata_dict = vnfc.get('image metadata', None)
-            image_metadata_str = None
-            if image_metadata_dict is not None:
-                image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
-            image_dict['metadata']=image_metadata_str
-            #print "create_or_use_image", mydb, vims, image_dict, rollback_list
-            image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
-            #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
-            VNFCDict[vnfc['name']]["image_id"] = image_id
-            VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
-            VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
-            if vnfc.get("boot-data"):
-                VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
-
-        # Step 7. Storing the VNF descriptor in the repository
-        if "descriptor" not in vnf_descriptor["vnf"]:
-            vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
-
-        # Step 8. Adding the VNF to the NFVO DB
-        vnf_id = mydb.new_vnf_as_a_whole2(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
-        return vnf_id
-    except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
-        _, message = rollback(mydb, vims, rollback_list)
-        if isinstance(e, db_base_Exception):
-            error_text = "Exception at database"
-        elif isinstance(e, KeyError):
-            error_text = "KeyError exception "
-            e.http_code = httperrors.Internal_Server_Error
-        else:
-            error_text = "Exception at VIM"
-        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
-        #logger.error("start_scenario %s", error_text)
-        raise NfvoException(error_text, e.http_code)
-
-
-def get_vnf_id(mydb, tenant_id, vnf_id):
-    #check valid tenant_id
-    check_tenant(mydb, tenant_id)
-    #obtain data
-    where_or = {}
-    if tenant_id != "any":
-        where_or["tenant_id"] = tenant_id
-        where_or["public"] = True
-    vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
-
-    vnf_id = vnf["uuid"]
-    filter_keys = ('uuid', 'name', 'description', 'public', "tenant_id", "osm_id", "created_at")
-    filtered_content = dict( (k,v) for k,v in vnf.iteritems() if k in filter_keys )
-    #change_keys_http2db(filtered_content, http2db_vnf, reverse=True)
-    data={'vnf' : filtered_content}
-    #GET VM
-    content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id',
-            SELECT=('vms.uuid as uuid', 'vms.osm_id as osm_id', 'vms.name as name', 'vms.description as description',
-                    'boot_data'),
-            WHERE={'vnfs.uuid': vnf_id} )
-    if len(content) != 0:
-        #raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
-    # change boot_data into boot-data
-        for vm in content:
-            if vm.get("boot_data"):
-                vm["boot-data"] = yaml.safe_load(vm["boot_data"])
-                del vm["boot_data"]
-
-        data['vnf']['VNFC'] = content
-    #TODO: GET all the information from a VNFC and include it in the output.
-
-    #GET NET
-    content = mydb.get_rows(FROM='vnfs join nets on vnfs.uuid=nets.vnf_id',
-                                    SELECT=('nets.uuid as uuid','nets.name as name','nets.description as description', 'nets.type as type', 'nets.multipoint as multipoint'),
-                                    WHERE={'vnfs.uuid': vnf_id} )
-    data['vnf']['nets'] = content
-
-    #GET ip-profile for each net
-    for net in data['vnf']['nets']:
-        ipprofiles = mydb.get_rows(FROM='ip_profiles',
-                                   SELECT=('ip_version','subnet_address','gateway_address','dns_address','dhcp_enabled','dhcp_start_address','dhcp_count'),
-                                   WHERE={'net_id': net["uuid"]} )
-        if len(ipprofiles)==1:
-            net["ip_profile"] = ipprofiles[0]
-        elif len(ipprofiles)>1:
-            raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
-
-
-    #TODO: For each net, GET its elements and relevant info per element (VNFC, iface, ip_address) and include them in the output.
-
-    #GET External Interfaces
-    content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces on vms.uuid=interfaces.vm_id',\
-                                    SELECT=('interfaces.uuid as uuid','interfaces.external_name as external_name', 'vms.name as vm_name', 'interfaces.vm_id as vm_id', \
-                                            'interfaces.internal_name as internal_name', 'interfaces.type as type', 'interfaces.vpci as vpci','interfaces.bw as bw'),\
-                                    WHERE={'vnfs.uuid': vnf_id, 'interfaces.external_name<>': None} )
-    #print content
-    data['vnf']['external-connections'] = content
-
-    return data
-
-
-def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None):
-    # Check tenant exist
-    if tenant_id != "any":
-        check_tenant(mydb, tenant_id)
-        # Get the URL of the VIM from the nfvo_tenant and the datacenter
-        vims = get_vim(mydb, tenant_id, ignore_errors=True)
-    else:
-        vims={}
-
-    # Checking if it is a valid uuid and, if not, getting the uuid assuming that the name was provided"
-    where_or = {}
-    if tenant_id != "any":
-        where_or["tenant_id"] = tenant_id
-        where_or["public"] = True
-    vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
-    vnf_id = vnf["uuid"]
-
-    # "Getting the list of flavors and tenants of the VNF"
-    flavorList = get_flavorlist(mydb, vnf_id)
-    if len(flavorList)==0:
-        logger.warn("delete_vnf error. No flavors found for the VNF id '%s'", vnf_id)
-
-    imageList = get_imagelist(mydb, vnf_id)
-    if len(imageList)==0:
-        logger.warn( "delete_vnf error. No images found for the VNF id '%s'", vnf_id)
-
-    deleted = mydb.delete_row_by_id('vnfs', vnf_id)
-    if deleted == 0:
-        raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
-
-    undeletedItems = []
-    for flavor in flavorList:
-        #check if flavor is used by other vnf
-        try:
-            c = mydb.get_rows(FROM='vms', WHERE={'flavor_id':flavor} )
-            if len(c) > 0:
-                logger.debug("Flavor '%s' not deleted because it is being used by another VNF", flavor)
-                continue
-            #flavor not used, must be deleted
-            #delelte at VIM
-            c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id': flavor})
-            for flavor_vim in c:
-                if not flavor_vim['created']:  # skip this flavor because not created by openmano
-                    continue
-                # look for vim
-                myvim = None
-                for vim in vims.values():
-                    if vim["config"]["datacenter_tenant_id"] == flavor_vim["datacenter_vim_id"]:
-                        myvim = vim
-                        break
-                if not myvim:
-                    continue
-                try:
-                    myvim.delete_flavor(flavor_vim["vim_id"])
-                except vimconn.vimconnNotFoundException:
-                    logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"],
-                                flavor_vim["datacenter_vim_id"] )
-                except vimconn.vimconnException as e:
-                    logger.error("Not possible to delete VIM flavor %s from datacenter %s: %s %s",
-                            flavor_vim["vim_id"], flavor_vim["datacenter_vim_id"], type(e).__name__, str(e))
-                    undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"],
-                                                                         flavor_vim["datacenter_vim_id"]))
-            # delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors
-            mydb.delete_row_by_id('flavors', flavor)
-        except db_base_Exception as e:
-            logger.error("delete_vnf_error. Not possible to get flavor details and delete '%s'. %s", flavor, str(e))
-            undeletedItems.append("flavor {}".format(flavor))
-
-
-    for image in imageList:
-        try:
-            #check if image is used by other vnf
-            c = mydb.get_rows(FROM='vms', WHERE=[{'image_id': image}, {'image_list LIKE ': '%' + image + '%'}])
-            if len(c) > 0:
-                logger.debug("Image '%s' not deleted because it is being used by another VNF", image)
-                continue
-            #image not used, must be deleted
-            #delelte at VIM
-            c = mydb.get_rows(FROM='datacenters_images', WHERE={'image_id':image})
-            for image_vim in c:
-                if image_vim["datacenter_vim_id"] not in vims:   # TODO change to datacenter_tenant_id
-                    continue
-                if image_vim['created']=='false': #skip this image because not created by openmano
-                    continue
-                myvim=vims[ image_vim["datacenter_id"] ]
-                try:
-                    myvim.delete_image(image_vim["vim_id"])
-                except vimconn.vimconnNotFoundException as e:
-                    logger.warn("VIM image %s not exist at datacenter %s", image_vim["vim_id"], image_vim["datacenter_id"] )
-                except vimconn.vimconnException as e:
-                    logger.error("Not possible to delete VIM image %s from datacenter %s: %s %s",
-                            image_vim["vim_id"], image_vim["datacenter_id"], type(e).__name__, str(e))
-                    undeletedItems.append("image {} from VIM {}".format(image_vim["vim_id"], image_vim["datacenter_id"] ))
-            #delete image from Database, using table images and with cascade foreign key also at datacenters_images
-            mydb.delete_row_by_id('images', image)
-        except db_base_Exception as e:
-            logger.error("delete_vnf_error. Not possible to get image details and delete '%s'. %s", image, str(e))
-            undeletedItems.append("image %s" % image)
-
-    return vnf_id + " " + vnf["name"]
-    #if undeletedItems:
-    #    return "delete_vnf. Undeleted: %s" %(undeletedItems)
-
-
-@deprecated("Not used")
-def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
-    result, vims = get_vim(mydb, nfvo_tenant_id, None, datacenter_name)
-    if result < 0:
-        return result, vims
-    elif result == 0:
-        return -httperrors.Not_Found, "datacenter '%s' not found" % datacenter_name
-    myvim = vims.values()[0]
-    result,servers =  myvim.get_hosts_info()
-    if result < 0:
-        return result, servers
-    topology = {'name':myvim['name'] , 'servers': servers}
-    return result, topology
-
-
-def get_hosts(mydb, nfvo_tenant_id):
-    vims = get_vim(mydb, nfvo_tenant_id)
-    if len(vims) == 0:
-        raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), httperrors.Not_Found)
-    elif len(vims)>1:
-        #print "nfvo.datacenter_action() error. Several datacenters found"
-        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
-    myvim = vims.values()[0]
-    try:
-        hosts =  myvim.get_hosts()
-        logger.debug('VIM hosts response: '+ yaml.safe_dump(hosts, indent=4, default_flow_style=False))
-
-        datacenter = {'Datacenters': [ {'name':myvim['name'],'servers':[]} ] }
-        for host in hosts:
-            server={'name':host['name'], 'vms':[]}
-            for vm in host['instances']:
-                #get internal name and model
-                try:
-                    c = mydb.get_rows(SELECT=('name',), FROM='instance_vms as iv join vms on iv.vm_id=vms.uuid',\
-                        WHERE={'vim_vm_id':vm['id']} )
-                    if len(c) == 0:
-                        logger.warn("nfvo.get_hosts virtual machine at VIM '{}' not found at tidnfvo".format(vm['id']))
-                        continue
-                    server['vms'].append( {'name':vm['name'] , 'model':c[0]['name']} )
-
-                except db_base_Exception as e:
-                    logger.warn("nfvo.get_hosts virtual machine at VIM '{}' error {}".format(vm['id'], str(e)))
-            datacenter['Datacenters'][0]['servers'].append(server)
-        #return -400, "en construccion"
-
-        #print 'datacenters '+ json.dumps(datacenter, indent=4)
-        return datacenter
-    except vimconn.vimconnException as e:
-        raise NfvoException("Not possible to get_host_list from VIM: {}".format(str(e)), e.http_code)
-
-
-@deprecated("Use new_nsd_v3")
-def new_scenario(mydb, tenant_id, topo):
-
-#    result, vims = get_vim(mydb, tenant_id)
-#    if result < 0:
-#        return result, vims
-#1: parse input
-    if tenant_id != "any":
-        check_tenant(mydb, tenant_id)
-        if "tenant_id" in topo:
-            if topo["tenant_id"] != tenant_id:
-                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(topo["tenant_id"], tenant_id),
-                                    httperrors.Unauthorized)
-    else:
-        tenant_id=None
-
-#1.1: get VNFs and external_networks (other_nets).
-    vnfs={}
-    other_nets={}  #external_networks, bridge_networks and data_networkds
-    nodes = topo['topology']['nodes']
-    for k in nodes.keys():
-        if nodes[k]['type'] == 'VNF':
-            vnfs[k] = nodes[k]
-            vnfs[k]['ifaces'] = {}
-        elif nodes[k]['type'] == 'other_network' or nodes[k]['type'] == 'external_network':
-            other_nets[k] = nodes[k]
-            other_nets[k]['external']=True
-        elif nodes[k]['type'] == 'network':
-            other_nets[k] = nodes[k]
-            other_nets[k]['external']=False
-
-
-#1.2: Check that VNF are present at database table vnfs. Insert uuid, description and external interfaces
-    for name,vnf in vnfs.items():
-        where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
-        error_text = ""
-        error_pos = "'topology':'nodes':'" + name + "'"
-        if 'vnf_id' in vnf:
-            error_text += " 'vnf_id' " +  vnf['vnf_id']
-            where['uuid'] = vnf['vnf_id']
-        if 'VNF model' in vnf:
-            error_text += " 'VNF model' " +  vnf['VNF model']
-            where['name'] = vnf['VNF model']
-        if len(where) == 1:
-            raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, httperrors.Bad_Request)
-
-        vnf_db = mydb.get_rows(SELECT=('uuid','name','description'),
-                               FROM='vnfs',
-                               WHERE=where)
-        if len(vnf_db)==0:
-            raise NfvoException("unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
-        elif len(vnf_db)>1:
-            raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
-        vnf['uuid']=vnf_db[0]['uuid']
-        vnf['description']=vnf_db[0]['description']
-        #get external interfaces
-        ext_ifaces = mydb.get_rows(SELECT=('external_name as name','i.uuid as iface_uuid', 'i.type as type'),
-            FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
-            WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
-        for ext_iface in ext_ifaces:
-            vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type':ext_iface['type']}
-
-#1.4 get list of connections
-    conections = topo['topology']['connections']
-    conections_list = []
-    conections_list_name = []
-    for k in conections.keys():
-        if type(conections[k]['nodes'])==dict: #dict with node:iface pairs
-            ifaces_list = conections[k]['nodes'].items()
-        elif type(conections[k]['nodes'])==list: #list with dictionary
-            ifaces_list=[]
-            conection_pair_list = map(lambda x: x.items(), conections[k]['nodes'] )
-            for k2 in conection_pair_list:
-                ifaces_list += k2
-
-        con_type = conections[k].get("type", "link")
-        if con_type != "link":
-            if k in other_nets:
-                raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), httperrors.Bad_Request)
-            other_nets[k] = {'external': False}
-            if conections[k].get("graph"):
-                other_nets[k]["graph"] =   conections[k]["graph"]
-            ifaces_list.append( (k, None) )
-
-
-        if con_type == "external_network":
-            other_nets[k]['external'] = True
-            if conections[k].get("model"):
-                other_nets[k]["model"] =   conections[k]["model"]
-            else:
-                other_nets[k]["model"] =   k
-        if con_type == "dataplane_net" or con_type == "bridge_net":
-            other_nets[k]["model"] = con_type
-
-        conections_list_name.append(k)
-        conections_list.append(set(ifaces_list)) #from list to set to operate as a set (this conversion removes elements that are repeated in a list)
-        #print set(ifaces_list)
-    #check valid VNF and iface names
-        for iface in ifaces_list:
-            if iface[0] not in vnfs and iface[0] not in other_nets :
-                raise NfvoException("format error. Invalid VNF name at 'topology':'connections':'{}':'nodes':'{}'".format(
-                                                                                        str(k), iface[0]), httperrors.Not_Found)
-            if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
-                raise NfvoException("format error. Invalid interface name at 'topology':'connections':'{}':'nodes':'{}':'{}'".format(
-                                                                                        str(k), iface[0], iface[1]), httperrors.Not_Found)
-
-#1.5 unify connections from the pair list to a consolidated list
-    index=0
-    while index < len(conections_list):
-        index2 = index+1
-        while index2 < len(conections_list):
-            if len(conections_list[index] & conections_list[index2])>0: #common interface, join nets
-                conections_list[index] |= conections_list[index2]
-                del conections_list[index2]
-                del conections_list_name[index2]
-            else:
-                index2 += 1
-        conections_list[index] = list(conections_list[index])  # from set to list again
-        index += 1
-    #for k in conections_list:
-    #    print k
-
-
-
-#1.6 Delete non external nets
-#    for k in other_nets.keys():
-#        if other_nets[k]['model']=='bridge' or other_nets[k]['model']=='dataplane_net' or other_nets[k]['model']=='bridge_net':
-#            for con in conections_list:
-#                delete_indexes=[]
-#                for index in range(0,len(con)):
-#                    if con[index][0] == k: delete_indexes.insert(0,index) #order from higher to lower
-#                for index in delete_indexes:
-#                    del con[index]
-#            del other_nets[k]
-#1.7: Check external_ports are present at database table datacenter_nets
-    for k,net in other_nets.items():
-        error_pos = "'topology':'nodes':'" + k + "'"
-        if net['external']==False:
-            if 'name' not in net:
-                net['name']=k
-            if 'model' not in net:
-                raise NfvoException("needed a 'model' at " + error_pos, httperrors.Bad_Request)
-            if net['model']=='bridge_net':
-                net['type']='bridge';
-            elif net['model']=='dataplane_net':
-                net['type']='data';
-            else:
-                raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, httperrors.Not_Found)
-        else: #external
-#IF we do not want to check that external network exist at datacenter
-            pass
-#ELSE
-#             error_text = ""
-#             WHERE_={}
-#             if 'net_id' in net:
-#                 error_text += " 'net_id' " +  net['net_id']
-#                 WHERE_['uuid'] = net['net_id']
-#             if 'model' in net:
-#                 error_text += " 'model' " +  net['model']
-#                 WHERE_['name'] = net['model']
-#             if len(WHERE_) == 0:
-#                 return -httperrors.Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
-#             r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
-#                 FROM='datacenter_nets', WHERE=WHERE_ )
-#             if r<0:
-#                 print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
-#             elif r==0:
-#                 print "nfvo.new_scenario Error" +error_text+ " is not present at database"
-#                 return -httperrors.Bad_Request, "unknown " +error_text+ " at " + error_pos
-#             elif r>1:
-#                 print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
-#                 return -httperrors.Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
-#             other_nets[k].update(net_db[0])
-#ENDIF
-    net_list={}
-    net_nb=0  #Number of nets
-    for con in conections_list:
-        #check if this is connected to a external net
-        other_net_index=-1
-        #print
-        #print "con", con
-        for index in range(0,len(con)):
-            #check if this is connected to a external net
-            for net_key in other_nets.keys():
-                if con[index][0]==net_key:
-                    if other_net_index>=0:
-                        error_text="There is some interface connected both to net '%s' and net '%s'" % (con[other_net_index][0], net_key)
-                        #print "nfvo.new_scenario " + error_text
-                        raise NfvoException(error_text, httperrors.Bad_Request)
-                    else:
-                        other_net_index = index
-                        net_target = net_key
-                    break
-        #print "other_net_index", other_net_index
-        try:
-            if other_net_index>=0:
-                del con[other_net_index]
-#IF we do not want to check that external network exist at datacenter
-                if other_nets[net_target]['external'] :
-                    if "name" not in other_nets[net_target]:
-                        other_nets[net_target]['name'] =  other_nets[net_target]['model']
-                    if other_nets[net_target]["type"] == "external_network":
-                        if vnfs[ con[0][0] ]['ifaces'][ con[0][1] ]["type"] == "data":
-                            other_nets[net_target]["type"] =  "data"
-                        else:
-                            other_nets[net_target]["type"] =  "bridge"
-#ELSE
-#                 if other_nets[net_target]['external'] :
-#                     type_='data' if len(con)>1 else 'ptp'  #an external net is connected to a external port, so it is ptp if only one connection is done to this net
-#                     if type_=='data' and other_nets[net_target]['type']=="ptp":
-#                         error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
-#                         print "nfvo.new_scenario " + error_text
-#                         return -httperrors.Bad_Request, error_text
-#ENDIF
-                for iface in con:
-                    vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
-            else:
-                #create a net
-                net_type_bridge=False
-                net_type_data=False
-                net_target = "__-__net"+str(net_nb)
-                net_list[net_target] = {'name': conections_list_name[net_nb],  #"net-"+str(net_nb),
-                    'description':"net-%s in scenario %s" %(net_nb,topo['name']),
-                    'external':False}
-                for iface in con:
-                    vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
-                    iface_type = vnfs[ iface[0] ]['ifaces'][ iface[1] ]['type']
-                    if iface_type=='mgmt' or iface_type=='bridge':
-                        net_type_bridge = True
-                    else:
-                        net_type_data = True
-                if net_type_bridge and net_type_data:
-                    error_text = "Error connection interfaces of bridge type with data type. Firs node %s, iface %s" % (iface[0], iface[1])
-                    #print "nfvo.new_scenario " + error_text
-                    raise NfvoException(error_text, httperrors.Bad_Request)
-                elif net_type_bridge:
-                    type_='bridge'
-                else:
-                    type_='data' if len(con)>2 else 'ptp'
-                net_list[net_target]['type'] = type_
-                net_nb+=1
-        except Exception:
-            error_text = "Error connection node %s : %s does not match any VNF or interface" % (iface[0], iface[1])
-            #print "nfvo.new_scenario " + error_text
-            #raise e
-            raise NfvoException(error_text, httperrors.Bad_Request)
-
-#1.8: Connect to management net all not already connected interfaces of type 'mgmt'
-    #1.8.1 obtain management net
-    mgmt_net = mydb.get_rows(SELECT=('uuid','name','description','type','shared'),
-        FROM='datacenter_nets', WHERE={'name':'mgmt'} )
-    #1.8.2 check all interfaces from all vnfs
-    if len(mgmt_net)>0:
-        add_mgmt_net = False
-        for vnf in vnfs.values():
-            for iface in vnf['ifaces'].values():
-                if iface['type']=='mgmt' and 'net_key' not in iface:
-                    #iface not connected
-                    iface['net_key'] = 'mgmt'
-                    add_mgmt_net = True
-        if add_mgmt_net and 'mgmt' not in net_list:
-            net_list['mgmt']=mgmt_net[0]
-            net_list['mgmt']['external']=True
-            net_list['mgmt']['graph']={'visible':False}
-
-    net_list.update(other_nets)
-    #print
-    #print 'net_list', net_list
-    #print
-    #print 'vnfs', vnfs
-    #print
-
-#2: insert scenario. filling tables scenarios,sce_vnfs,sce_interfaces,sce_nets
-    c = mydb.new_scenario( { 'vnfs':vnfs, 'nets':net_list,
-        'tenant_id':tenant_id, 'name':topo['name'],
-         'description':topo.get('description',topo['name']),
-         'public': topo.get('public', False)
-         })
-
-    return c
-
-
-@deprecated("Use new_nsd_v3")
-def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
-    """ This creates a new scenario for version 0.2 and 0.3"""
-    scenario = scenario_dict["scenario"]
-    if tenant_id != "any":
-        check_tenant(mydb, tenant_id)
-        if "tenant_id" in scenario:
-            if scenario["tenant_id"] != tenant_id:
-                # print "nfvo.new_scenario_v02() tenant '%s' not found" % tenant_id
-                raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(
-                                                    scenario["tenant_id"], tenant_id), httperrors.Unauthorized)
-    else:
-        tenant_id=None
-
-    # 1: Check that VNF are present at database table vnfs and update content into scenario dict
-    for name,vnf in scenario["vnfs"].iteritems():
-        where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
-        error_text = ""
-        error_pos = "'scenario':'vnfs':'" + name + "'"
-        if 'vnf_id' in vnf:
-            error_text += " 'vnf_id' " + vnf['vnf_id']
-            where['uuid'] = vnf['vnf_id']
-        if 'vnf_name' in vnf:
-            error_text += " 'vnf_name' " + vnf['vnf_name']
-            where['name'] = vnf['vnf_name']
-        if len(where) == 1:
-            raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, httperrors.Bad_Request)
-        vnf_db = mydb.get_rows(SELECT=('uuid', 'name', 'description'),
-                               FROM='vnfs',
-                               WHERE=where)
-        if len(vnf_db) == 0:
-            raise NfvoException("Unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
-        elif len(vnf_db) > 1:
-            raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
-        vnf['uuid'] = vnf_db[0]['uuid']
-        vnf['description'] = vnf_db[0]['description']
-        vnf['ifaces'] = {}
-        # get external interfaces
-        ext_ifaces = mydb.get_rows(SELECT=('external_name as name', 'i.uuid as iface_uuid', 'i.type as type'),
-                                   FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
-                                   WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
-        for ext_iface in ext_ifaces:
-            vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type': ext_iface['type']}
-        # TODO? get internal-connections from db.nets and their profiles, and update scenario[vnfs][internal-connections] accordingly
-
-    # 2: Insert net_key and ip_address at every vnf interface
-    for net_name, net in scenario["networks"].items():
-        net_type_bridge = False
-        net_type_data = False
-        for iface_dict in net["interfaces"]:
-            if version == "0.2":
-                temp_dict = iface_dict
-                ip_address = None
-            elif version == "0.3":
-                temp_dict = {iface_dict["vnf"] : iface_dict["vnf_interface"]}
-                ip_address = iface_dict.get('ip_address', None)
-            for vnf, iface in temp_dict.items():
-                if vnf not in scenario["vnfs"]:
-                    error_text = "Error at 'networks':'{}':'interfaces' VNF '{}' not match any VNF at 'vnfs'".format(
-                        net_name, vnf)
-                    # logger.debug("nfvo.new_scenario_v02 " + error_text)
-                    raise NfvoException(error_text, httperrors.Not_Found)
-                if iface not in scenario["vnfs"][vnf]['ifaces']:
-                    error_text = "Error at 'networks':'{}':'interfaces':'{}' interface not match any VNF interface"\
-                        .format(net_name, iface)
-                    # logger.debug("nfvo.new_scenario_v02 " + error_text)
-                    raise NfvoException(error_text, httperrors.Bad_Request)
-                if "net_key" in scenario["vnfs"][vnf]['ifaces'][iface]:
-                    error_text = "Error at 'networks':'{}':'interfaces':'{}' interface already connected at network"\
-                                 "'{}'".format(net_name, iface,scenario["vnfs"][vnf]['ifaces'][iface]['net_key'])
-                    # logger.debug("nfvo.new_scenario_v02 " + error_text)
-                    raise NfvoException(error_text, httperrors.Bad_Request)
-                scenario["vnfs"][vnf]['ifaces'][ iface ]['net_key'] = net_name
-                scenario["vnfs"][vnf]['ifaces'][iface]['ip_address'] = ip_address
-                iface_type = scenario["vnfs"][vnf]['ifaces'][iface]['type']
-                if iface_type == 'mgmt' or iface_type == 'bridge':
-                    net_type_bridge = True
-                else:
-                    net_type_data = True
-
-        if net_type_bridge and net_type_data:
-            error_text = "Error connection interfaces of 'bridge' type and 'data' type at 'networks':'{}':'interfaces'"\
-                .format(net_name)
-            # logger.debug("nfvo.new_scenario " + error_text)
-            raise NfvoException(error_text, httperrors.Bad_Request)
-        elif net_type_bridge:
-            type_ = 'bridge'
-        else:
-            type_ = 'data' if len(net["interfaces"]) > 2 else 'ptp'
-
-        if net.get("implementation"):     # for v0.3
-            if type_ == "bridge" and net["implementation"] == "underlay":
-                error_text = "Error connecting interfaces of data type to a network declared as 'underlay' at "\
-                             "'network':'{}'".format(net_name)
-                # logger.debug(error_text)
-                raise NfvoException(error_text, httperrors.Bad_Request)
-            elif type_ != "bridge" and net["implementation"] == "overlay":
-                error_text = "Error connecting interfaces of data type to a network declared as 'overlay' at "\
-                             "'network':'{}'".format(net_name)
-                # logger.debug(error_text)
-                raise NfvoException(error_text, httperrors.Bad_Request)
-            net.pop("implementation")
-        if "type" in net and version == "0.3":   # for v0.3
-            if type_ == "data" and net["type"] == "e-line":
-                error_text = "Error connecting more than 2 interfaces of data type to a network declared as type "\
-                             "'e-line' at 'network':'{}'".format(net_name)
-                # logger.debug(error_text)
-                raise NfvoException(error_text, httperrors.Bad_Request)
-            elif type_ == "ptp" and net["type"] == "e-lan":
-                type_ = "data"
-
-        net['type'] = type_
-        net['name'] = net_name
-        net['external'] = net.get('external', False)
-
-    # 3: insert at database
-    scenario["nets"] = scenario["networks"]
-    scenario['tenant_id'] = tenant_id
-    scenario_id = mydb.new_scenario(scenario)
-    return scenario_id
-
-
-def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
-    """
-    Parses an OSM IM nsd_catalog and insert at DB
-    :param mydb:
-    :param tenant_id:
-    :param nsd_descriptor:
-    :return: The list of created NSD ids
-    """
-    try:
-        mynsd = nsd_catalog.nsd()
-        try:
-            pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd, skip_unknown=True)
-        except Exception as e:
-            raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
-        db_scenarios = []
-        db_sce_nets = []
-        db_sce_vnfs = []
-        db_sce_interfaces = []
-        db_sce_vnffgs = []
-        db_sce_rsps = []
-        db_sce_rsp_hops = []
-        db_sce_classifiers = []
-        db_sce_classifier_matches = []
-        db_ip_profiles = []
-        db_ip_profiles_index = 0
-        uuid_list = []
-        nsd_uuid_list = []
-        for nsd_yang in mynsd.nsd_catalog.nsd.itervalues():
-            nsd = nsd_yang.get()
-
-            # table scenarios
-            scenario_uuid = str(uuid4())
-            uuid_list.append(scenario_uuid)
-            nsd_uuid_list.append(scenario_uuid)
-            db_scenario = {
-                "uuid": scenario_uuid,
-                "osm_id": get_str(nsd, "id", 255),
-                "name": get_str(nsd, "name", 255),
-                "description": get_str(nsd, "description", 255),
-                "tenant_id": tenant_id,
-                "vendor": get_str(nsd, "vendor", 255),
-                "short_name": get_str(nsd, "short-name", 255),
-                "descriptor": str(nsd_descriptor)[:60000],
-            }
-            db_scenarios.append(db_scenario)
-
-            # table sce_vnfs (constituent-vnfd)
-            vnf_index2scevnf_uuid = {}
-            vnf_index2vnf_uuid = {}
-            for vnf in nsd.get("constituent-vnfd").itervalues():
-                existing_vnf = mydb.get_rows(FROM="vnfs", WHERE={'osm_id': str(vnf["vnfd-id-ref"])[:255],
-                                                                      'tenant_id': tenant_id})
-                if not existing_vnf:
-                    raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'constituent-vnfd':'vnfd-id-ref':"
-                                        "'{}'. Reference to a non-existing VNFD in the catalog".format(
-                                            str(nsd["id"]), str(vnf["vnfd-id-ref"])[:255]),
-                                        httperrors.Bad_Request)
-                sce_vnf_uuid = str(uuid4())
-                uuid_list.append(sce_vnf_uuid)
-                db_sce_vnf = {
-                    "uuid": sce_vnf_uuid,
-                    "scenario_id": scenario_uuid,
-                    # "name": get_str(vnf, "member-vnf-index", 255),
-                    "name": existing_vnf[0]["name"][:200] + "." + get_str(vnf, "member-vnf-index", 50),
-                    "vnf_id": existing_vnf[0]["uuid"],
-                    "member_vnf_index": str(vnf["member-vnf-index"]),
-                    # TODO 'start-by-default': True
-                }
-                vnf_index2scevnf_uuid[str(vnf['member-vnf-index'])] = sce_vnf_uuid
-                vnf_index2vnf_uuid[str(vnf['member-vnf-index'])] = existing_vnf[0]["uuid"]
-                db_sce_vnfs.append(db_sce_vnf)
-
-            # table ip_profiles (ip-profiles)
-            ip_profile_name2db_table_index = {}
-            for ip_profile in nsd.get("ip-profiles").itervalues():
-                db_ip_profile = {
-                    "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
-                    "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
-                    "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
-                    "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
-                    "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
-                    "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
-                }
-                dns_list = []
-                for dns in ip_profile["ip-profile-params"]["dns-server"].itervalues():
-                    dns_list.append(str(dns.get("address")))
-                db_ip_profile["dns_address"] = ";".join(dns_list)
-                if ip_profile["ip-profile-params"].get('security-group'):
-                    db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
-                ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
-                db_ip_profiles_index += 1
-                db_ip_profiles.append(db_ip_profile)
-
-            # table sce_nets (internal-vld)
-            for vld in nsd.get("vld").itervalues():
-                sce_net_uuid = str(uuid4())
-                uuid_list.append(sce_net_uuid)
-                db_sce_net = {
-                    "uuid": sce_net_uuid,
-                    "name": get_str(vld, "name", 255),
-                    "scenario_id": scenario_uuid,
-                    # "type": #TODO
-                    "multipoint": not vld.get("type") == "ELINE",
-                    "osm_id":  get_str(vld, "id", 255),
-                    # "external": #TODO
-                    "description": get_str(vld, "description", 255),
-                }
-                # guess type of network
-                if vld.get("mgmt-network"):
-                    db_sce_net["type"] = "bridge"
-                    db_sce_net["external"] = True
-                elif vld.get("provider-network").get("overlay-type") == "VLAN":
-                    db_sce_net["type"] = "data"
-                else:
-                    # later on it will be fixed to bridge or data depending on the type of interfaces attached to it
-                    db_sce_net["type"] = None
-                db_sce_nets.append(db_sce_net)
-
-                # ip-profile, link db_ip_profile with db_sce_net
-                if vld.get("ip-profile-ref"):
-                    ip_profile_name = vld.get("ip-profile-ref")
-                    if ip_profile_name not in ip_profile_name2db_table_index:
-                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'ip-profile-ref':'{}'."
-                                            " Reference to a non-existing 'ip_profiles'".format(
-                                                str(nsd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
-                                            httperrors.Bad_Request)
-                    db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["sce_net_id"] = sce_net_uuid
-                elif vld.get("vim-network-name"):
-                    db_sce_net["vim_network_name"] = get_str(vld, "vim-network-name", 255)
-
-                # table sce_interfaces (vld:vnfd-connection-point-ref)
-                for iface in vld.get("vnfd-connection-point-ref").itervalues():
-                    vnf_index = str(iface['member-vnf-index-ref'])
-                    # check correct parameters
-                    if vnf_index not in vnf_index2vnf_uuid:
-                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
-                                            "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
-                                            "'nsd':'constituent-vnfd'".format(
-                                                str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
-                                            httperrors.Bad_Request)
-
-                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
-                                                    FROM="interfaces as i join vms on i.vm_id=vms.uuid",
-                                                    WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
-                                                           'external_name': get_str(iface, "vnfd-connection-point-ref",
-                                                                                    255)})
-                    if not existing_ifaces:
-                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
-                                            "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
-                                            "connection-point name at VNFD '{}'".format(
-                                                str(nsd["id"]), str(vld["id"]), str(iface["vnfd-connection-point-ref"]),
-                                                str(iface.get("vnfd-id-ref"))[:255]),
-                                            httperrors.Bad_Request)
-                    interface_uuid = existing_ifaces[0]["uuid"]
-                    if existing_ifaces[0]["iface_type"] == "data":
-                        db_sce_net["type"] = "data"
-                    sce_interface_uuid = str(uuid4())
-                    uuid_list.append(sce_net_uuid)
-                    iface_ip_address = None
-                    if iface.get("ip-address"):
-                        iface_ip_address = str(iface.get("ip-address"))
-                    db_sce_interface = {
-                        "uuid": sce_interface_uuid,
-                        "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
-                        "sce_net_id": sce_net_uuid,
-                        "interface_id": interface_uuid,
-                        "ip_address": iface_ip_address,
-                    }
-                    db_sce_interfaces.append(db_sce_interface)
-                if not db_sce_net["type"]:
-                    db_sce_net["type"] = "bridge"
-
-            # table sce_vnffgs (vnffgd)
-            for vnffg in nsd.get("vnffgd").itervalues():
-                sce_vnffg_uuid = str(uuid4())
-                uuid_list.append(sce_vnffg_uuid)
-                db_sce_vnffg = {
-                    "uuid": sce_vnffg_uuid,
-                    "name": get_str(vnffg, "name", 255),
-                    "scenario_id": scenario_uuid,
-                    "vendor": get_str(vnffg, "vendor", 255),
-                    "description": get_str(vld, "description", 255),
-                }
-                db_sce_vnffgs.append(db_sce_vnffg)
-
-                # deal with rsps
-                for rsp in vnffg.get("rsp").itervalues():
-                    sce_rsp_uuid = str(uuid4())
-                    uuid_list.append(sce_rsp_uuid)
-                    db_sce_rsp = {
-                        "uuid": sce_rsp_uuid,
-                        "name": get_str(rsp, "name", 255),
-                        "sce_vnffg_id": sce_vnffg_uuid,
-                        "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
-                    }
-                    db_sce_rsps.append(db_sce_rsp)
-                    for iface in rsp.get("vnfd-connection-point-ref").itervalues():
-                        vnf_index = str(iface['member-vnf-index-ref'])
-                        if_order = int(iface['order'])
-                        # check correct parameters
-                        if vnf_index not in vnf_index2vnf_uuid:
-                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
-                                                "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
-                                                "'nsd':'constituent-vnfd'".format(
-                                                    str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
-                                                httperrors.Bad_Request)
-
-                        ingress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
-                                                                FROM="interfaces as i join vms on i.vm_id=vms.uuid",
-                                                                WHERE={
-                                                                    'vnf_id': vnf_index2vnf_uuid[vnf_index],
-                                                                    'external_name': get_str(iface, "vnfd-ingress-connection-point-ref",
-                                                                                             255)})
-                        if not ingress_existing_ifaces:
-                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
-                                                "-ref':'vnfd-ingress-connection-point-ref':'{}'. Reference to a non-existing "
-                                                "connection-point name at VNFD '{}'".format(
-                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-ingress-connection-point-ref"]),
-                                str(iface.get("vnfd-id-ref"))[:255]), httperrors.Bad_Request)
-
-                        egress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
-                                                               FROM="interfaces as i join vms on i.vm_id=vms.uuid",
-                                                               WHERE={
-                                                                   'vnf_id': vnf_index2vnf_uuid[vnf_index],
-                                                                   'external_name': get_str(iface, "vnfd-egress-connection-point-ref",
-                                                                                            255)})
-                        if not egress_existing_ifaces:
-                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
-                                                "-ref':'vnfd-egress-connection-point-ref':'{}'. Reference to a non-existing "
-                                                "connection-point name at VNFD '{}'".format(
-                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-egress-connection-point-ref"]),
-                                str(iface.get("vnfd-id-ref"))[:255]), HTTP_Bad_Request)
-
-                        ingress_interface_uuid = ingress_existing_ifaces[0]["uuid"]
-                        egress_interface_uuid = egress_existing_ifaces[0]["uuid"]
-                        sce_rsp_hop_uuid = str(uuid4())
-                        uuid_list.append(sce_rsp_hop_uuid)
-                        db_sce_rsp_hop = {
-                            "uuid": sce_rsp_hop_uuid,
-                            "if_order": if_order,
-                            "ingress_interface_id": ingress_interface_uuid,
-                            "egress_interface_id": egress_interface_uuid,
-                            "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
-                            "sce_rsp_id": sce_rsp_uuid,
-                        }
-                        db_sce_rsp_hops.append(db_sce_rsp_hop)
-
-                # deal with classifiers
-                for classifier in vnffg.get("classifier").itervalues():
-                    sce_classifier_uuid = str(uuid4())
-                    uuid_list.append(sce_classifier_uuid)
-
-                    # source VNF
-                    vnf_index = str(classifier['member-vnf-index-ref'])
-                    if vnf_index not in vnf_index2vnf_uuid:
-                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
-                                            "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
-                                            "'nsd':'constituent-vnfd'".format(
-                                                str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
-                                            httperrors.Bad_Request)
-                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
-                                                    FROM="interfaces as i join vms on i.vm_id=vms.uuid",
-                                                    WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
-                                                           'external_name': get_str(classifier, "vnfd-connection-point-ref",
-                                                                                    255)})
-                    if not existing_ifaces:
-                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
-                                            "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
-                                            "connection-point name at VNFD '{}'".format(
-                                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
-                                                str(iface.get("vnfd-id-ref"))[:255]),
-                                            httperrors.Bad_Request)
-                    interface_uuid = existing_ifaces[0]["uuid"]
-
-                    db_sce_classifier = {
-                        "uuid": sce_classifier_uuid,
-                        "name": get_str(classifier, "name", 255),
-                        "sce_vnffg_id": sce_vnffg_uuid,
-                        "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
-                        "interface_id": interface_uuid,
-                    }
-                    rsp_id = get_str(classifier, "rsp-id-ref", 255)
-                    rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
-                    db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
-                    db_sce_classifiers.append(db_sce_classifier)
-
-                    for match in classifier.get("match-attributes").itervalues():
-                        sce_classifier_match_uuid = str(uuid4())
-                        uuid_list.append(sce_classifier_match_uuid)
-                        db_sce_classifier_match = {
-                            "uuid": sce_classifier_match_uuid,
-                            "ip_proto": get_str(match, "ip-proto", 2),
-                            "source_ip": get_str(match, "source-ip-address", 16),
-                            "destination_ip": get_str(match, "destination-ip-address", 16),
-                            "source_port": get_str(match, "source-port", 5),
-                            "destination_port": get_str(match, "destination-port", 5),
-                            "sce_classifier_id": sce_classifier_uuid,
-                        }
-                        db_sce_classifier_matches.append(db_sce_classifier_match)
-                    # TODO: vnf/cp keys
-
-        # remove unneeded id's in sce_rsps
-        for rsp in db_sce_rsps:
-            rsp.pop('id')
-
-        db_tables = [
-            {"scenarios": db_scenarios},
-            {"sce_nets": db_sce_nets},
-            {"ip_profiles": db_ip_profiles},
-            {"sce_vnfs": db_sce_vnfs},
-            {"sce_interfaces": db_sce_interfaces},
-            {"sce_vnffgs": db_sce_vnffgs},
-            {"sce_rsps": db_sce_rsps},
-            {"sce_rsp_hops": db_sce_rsp_hops},
-            {"sce_classifiers": db_sce_classifiers},
-            {"sce_classifier_matches": db_sce_classifier_matches},
-        ]
-
-        logger.debug("new_nsd_v3 done: %s",
-                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
-        mydb.new_rows(db_tables, uuid_list)
-        return nsd_uuid_list
-    except NfvoException:
-        raise
-    except Exception as e:
-        logger.error("Exception {}".format(e))
-        raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
-
-
-def edit_scenario(mydb, tenant_id, scenario_id, data):
-    data["uuid"] = scenario_id
-    data["tenant_id"] = tenant_id
-    c = mydb.edit_scenario( data )
-    return c
-
-
-@deprecated("Use create_instance")
-def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instance_scenario_description, datacenter=None,vim_tenant=None, startvms=True):
-    #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-    datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter, vim_tenant=vim_tenant)
-    vims = {datacenter_id: myvim}
-    myvim_tenant = myvim['tenant_id']
-    datacenter_name = myvim['name']
-
-    rollbackList=[]
-    try:
-        #print "Checking that the scenario_id exists and getting the scenario dictionary"
-        scenarioDict = mydb.get_scenario(scenario_id, tenant_id, datacenter_id=datacenter_id)
-        scenarioDict['datacenter2tenant'] = { datacenter_id: myvim['config']['datacenter_tenant_id'] }
-        scenarioDict['datacenter_id'] = datacenter_id
-        #print '================scenarioDict======================='
-        #print json.dumps(scenarioDict, indent=4)
-        #print 'BEGIN launching instance scenario "%s" based on "%s"' % (instance_scenario_name,scenarioDict['name'])
-
-        logger.debug("start_scenario Scenario %s: consisting of %d VNF(s)", scenarioDict['name'],len(scenarioDict['vnfs']))
-        #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
-
-        auxNetDict = {}   #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
-        auxNetDict['scenario'] = {}
-
-        logger.debug("start_scenario 1. Creating new nets (sce_nets) in the VIM")
-        for sce_net in scenarioDict['nets']:
-            #print "Net name: %s. Description: %s" % (sce_net["name"], sce_net["description"])
-
-            myNetName = "%s.%s" % (instance_scenario_name, sce_net['name'])
-            myNetName = myNetName[0:255] #limit length
-            myNetType = sce_net['type']
-            myNetDict = {}
-            myNetDict["name"] = myNetName
-            myNetDict["type"] = myNetType
-            myNetDict["tenant_id"] = myvim_tenant
-            myNetIPProfile = sce_net.get('ip_profile', None)
-            #TODO:
-            #We should use the dictionary as input parameter for new_network
-            #print myNetDict
-            if not sce_net["external"]:
-                network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile)
-                #print "New VIM network created for scenario %s. Network id:  %s" % (scenarioDict['name'],network_id)
-                sce_net['vim_id'] = network_id
-                auxNetDict['scenario'][sce_net['uuid']] = network_id
-                rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
-                sce_net["created"] = True
-            else:
-                if sce_net['vim_id'] == None:
-                    error_text = "Error, datacenter '%s' does not have external network '%s'." % (datacenter_name, sce_net['name'])
-                    _, message = rollback(mydb, vims, rollbackList)
-                    logger.error("nfvo.start_scenario: %s", error_text)
-                    raise NfvoException(error_text, httperrors.Bad_Request)
-                logger.debug("Using existent VIM network for scenario %s. Network id %s", scenarioDict['name'],sce_net['vim_id'])
-                auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
-
-        logger.debug("start_scenario 2. Creating new nets (vnf internal nets) in the VIM")
-        #For each vnf net, we create it and we add it to instanceNetlist.
-
-        for sce_vnf in scenarioDict['vnfs']:
-            for net in sce_vnf['nets']:
-                #print "Net name: %s. Description: %s" % (net["name"], net["description"])
-
-                myNetName = "%s.%s" % (instance_scenario_name,net['name'])
-                myNetName = myNetName[0:255] #limit length
-                myNetType = net['type']
-                myNetDict = {}
-                myNetDict["name"] = myNetName
-                myNetDict["type"] = myNetType
-                myNetDict["tenant_id"] = myvim_tenant
-                myNetIPProfile = net.get('ip_profile', None)
-                #print myNetDict
-                #TODO:
-                #We should use the dictionary as input parameter for new_network
-                network_id, _  = myvim.new_network(myNetName, myNetType, myNetIPProfile)
-                #print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
-                net['vim_id'] = network_id
-                if sce_vnf['uuid'] not in auxNetDict:
-                    auxNetDict[sce_vnf['uuid']] = {}
-                auxNetDict[sce_vnf['uuid']][net['uuid']] = network_id
-                rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
-                net["created"] = True
-
-        #print "auxNetDict:"
-        #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
-
-        logger.debug("start_scenario 3. Creating new vm instances in the VIM")
-        #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
-        i = 0
-        for sce_vnf in scenarioDict['vnfs']:
-            vnf_availability_zones = []
-            for vm in sce_vnf['vms']:
-                vm_av = vm.get('availability_zone')
-                if vm_av and vm_av not in vnf_availability_zones:
-                    vnf_availability_zones.append(vm_av)
-
-            # check if there is enough availability zones available at vim level.
-            if myvims[datacenter_id].availability_zone and vnf_availability_zones:
-                if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
-                    raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
-
-            for vm in sce_vnf['vms']:
-                i += 1
-                myVMDict = {}
-                #myVMDict['name'] = "%s-%s-%s" % (scenarioDict['name'],sce_vnf['name'], vm['name'])
-                myVMDict['name'] = "{}.{}.{}".format(instance_scenario_name,sce_vnf['name'],chr(96+i))
-                #myVMDict['description'] = vm['description']
-                myVMDict['description'] = myVMDict['name'][0:99]
-                if not startvms:
-                    myVMDict['start'] = "no"
-                myVMDict['name'] = myVMDict['name'][0:255] #limit name length
-                #print "VM name: %s. Description: %s" % (myVMDict['name'], myVMDict['name'])
-
-                #create image at vim in case it not exist
-                image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
-                image_id = create_or_use_image(mydb, vims, image_dict, [], True)
-                vm['vim_image_id'] = image_id
-
-                #create flavor at vim in case it not exist
-                flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
-                if flavor_dict['extended']!=None:
-                    flavor_dict['extended']= yaml.load(flavor_dict['extended'])
-                flavor_id = create_or_use_flavor(mydb, vims, flavor_dict, [], True)
-                vm['vim_flavor_id'] = flavor_id
-
-
-                myVMDict['imageRef'] = vm['vim_image_id']
-                myVMDict['flavorRef'] = vm['vim_flavor_id']
-                myVMDict['networks'] = []
-                for iface in vm['interfaces']:
-                    netDict = {}
-                    if iface['type']=="data":
-                        netDict['type'] = iface['model']
-                    elif "model" in iface and iface["model"]!=None:
-                        netDict['model']=iface['model']
-                    #TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
-                    #discover type of interface looking at flavor
-                    for numa in flavor_dict.get('extended',{}).get('numas',[]):
-                        for flavor_iface in numa.get('interfaces',[]):
-                            if flavor_iface.get('name') == iface['internal_name']:
-                                if flavor_iface['dedicated'] == 'yes':
-                                    netDict['type']="PF"    #passthrough
-                                elif flavor_iface['dedicated'] == 'no':
-                                    netDict['type']="VF"    #siov
-                                elif flavor_iface['dedicated'] == 'yes:sriov':
-                                    netDict['type']="VFnotShared"   #sriov but only one sriov on the PF
-                                netDict["mac_address"] = flavor_iface.get("mac_address")
-                                break;
-                    netDict["use"]=iface['type']
-                    if netDict["use"]=="data" and not netDict.get("type"):
-                        #print "netDict", netDict
-                        #print "iface", iface
-                        e_text = "Cannot determine the interface type PF or VF of VNF '%s' VM '%s' iface '%s'" %(sce_vnf['name'], vm['name'], iface['internal_name'])
-                        if flavor_dict.get('extended')==None:
-                            raise NfvoException(e_text  + "After database migration some information is not available. \
-                                    Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
-                        else:
-                            raise NfvoException(e_text, httperrors.Internal_Server_Error)
-                    if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
-                        netDict["type"]="virtual"
-                    if "vpci" in iface and iface["vpci"] is not None:
-                        netDict['vpci'] = iface['vpci']
-                    if "mac" in iface and iface["mac"] is not None:
-                        netDict['mac_address'] = iface['mac']
-                    if "port-security" in iface and iface["port-security"] is not None:
-                        netDict['port_security'] = iface['port-security']
-                    if "floating-ip" in iface and iface["floating-ip"] is not None:
-                        netDict['floating_ip'] = iface['floating-ip']
-                    netDict['name'] = iface['internal_name']
-                    if iface['net_id'] is None:
-                        for vnf_iface in sce_vnf["interfaces"]:
-                            #print iface
-                            #print vnf_iface
-                            if vnf_iface['interface_id']==iface['uuid']:
-                                netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ]
-                                break
-                    else:
-                        netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
-                    #skip bridge ifaces not connected to any net
-                    #if 'net_id' not in netDict or netDict['net_id']==None:
-                    #    continue
-                    myVMDict['networks'].append(netDict)
-                #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
-                #print myVMDict['name']
-                #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
-                #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
-                #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
-
-                if 'availability_zone' in myVMDict:
-                    av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
-                else:
-                    av_index = None
-
-                vm_id, _ = myvim.new_vminstance(myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
-                                             myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
-                                             availability_zone_index=av_index,
-                                             availability_zone_list=vnf_availability_zones)
-                #print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
-                vm['vim_id'] = vm_id
-                rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
-                #put interface uuid back to scenario[vnfs][vms[[interfaces]
-                for net in myVMDict['networks']:
-                    if "vim_id" in net:
-                        for iface in vm['interfaces']:
-                            if net["name"]==iface["internal_name"]:
-                                iface["vim_id"]=net["vim_id"]
-                                break
-
-        logger.debug("start scenario Deployment done")
-        #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
-        #r,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,scenarioDict['name'],scenarioDict)
-        instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_scenario_name, instance_scenario_description, scenarioDict)
-        return mydb.get_instance_scenario(instance_id)
-
-    except (db_base_Exception, vimconn.vimconnException) as e:
-        _, message = rollback(mydb, vims, rollbackList)
-        if isinstance(e, db_base_Exception):
-            error_text = "Exception at database"
-        else:
-            error_text = "Exception at VIM"
-        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
-        #logger.error("start_scenario %s", error_text)
-        raise NfvoException(error_text, e.http_code)
-
-def unify_cloud_config(cloud_config_preserve, cloud_config):
-    """ join the cloud config information into cloud_config_preserve.
-    In case of conflict cloud_config_preserve preserves
-    None is allowed
-    """
-    if not cloud_config_preserve and not cloud_config:
-        return None
-
-    new_cloud_config = {"key-pairs":[], "users":[]}
-    # key-pairs
-    if cloud_config_preserve:
-        for key in cloud_config_preserve.get("key-pairs", () ):
-            if key not in new_cloud_config["key-pairs"]:
-                new_cloud_config["key-pairs"].append(key)
-    if cloud_config:
-        for key in cloud_config.get("key-pairs", () ):
-            if key not in new_cloud_config["key-pairs"]:
-                new_cloud_config["key-pairs"].append(key)
-    if not new_cloud_config["key-pairs"]:
-        del new_cloud_config["key-pairs"]
-
-    # users
-    if cloud_config:
-        new_cloud_config["users"] += cloud_config.get("users", () )
-    if cloud_config_preserve:
-        new_cloud_config["users"] += cloud_config_preserve.get("users", () )
-    index_to_delete = []
-    users = new_cloud_config.get("users", [])
-    for index0 in range(0,len(users)):
-        if index0 in index_to_delete:
-            continue
-        for index1 in range(index0+1,len(users)):
-            if index1 in index_to_delete:
-                continue
-            if users[index0]["name"] == users[index1]["name"]:
-                index_to_delete.append(index1)
-                for key in users[index1].get("key-pairs",()):
-                    if "key-pairs" not in users[index0]:
-                        users[index0]["key-pairs"] = [key]
-                    elif key not in users[index0]["key-pairs"]:
-                        users[index0]["key-pairs"].append(key)
-    index_to_delete.sort(reverse=True)
-    for index in index_to_delete:
-        del users[index]
-    if not new_cloud_config["users"]:
-        del new_cloud_config["users"]
-
-    #boot-data-drive
-    if cloud_config and cloud_config.get("boot-data-drive") != None:
-        new_cloud_config["boot-data-drive"] = cloud_config["boot-data-drive"]
-    if cloud_config_preserve and cloud_config_preserve.get("boot-data-drive") != None:
-        new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
-
-    # user-data
-    new_cloud_config["user-data"] = []
-    if cloud_config and cloud_config.get("user-data"):
-        if isinstance(cloud_config["user-data"], list):
-            new_cloud_config["user-data"] += cloud_config["user-data"]
-        else:
-            new_cloud_config["user-data"].append(cloud_config["user-data"])
-    if cloud_config_preserve and cloud_config_preserve.get("user-data"):
-        if isinstance(cloud_config_preserve["user-data"], list):
-            new_cloud_config["user-data"] += cloud_config_preserve["user-data"]
-        else:
-            new_cloud_config["user-data"].append(cloud_config_preserve["user-data"])
-    if not new_cloud_config["user-data"]:
-        del new_cloud_config["user-data"]
-
-    # config files
-    new_cloud_config["config-files"] = []
-    if cloud_config and cloud_config.get("config-files") != None:
-        new_cloud_config["config-files"] += cloud_config["config-files"]
-    if cloud_config_preserve:
-        for file in cloud_config_preserve.get("config-files", ()):
-            for index in range(0, len(new_cloud_config["config-files"])):
-                if new_cloud_config["config-files"][index]["dest"] == file["dest"]:
-                    new_cloud_config["config-files"][index] = file
-                    break
-            else:
-                new_cloud_config["config-files"].append(file)
-    if not new_cloud_config["config-files"]:
-        del new_cloud_config["config-files"]
-    return new_cloud_config
-
-
-def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
-    datacenter_id = None
-    datacenter_name = None
-    thread = None
-    try:
-        if datacenter_tenant_id:
-            thread_id = datacenter_tenant_id
-            thread = vim_threads["running"].get(datacenter_tenant_id)
-        else:
-            where_={"td.nfvo_tenant_id": tenant_id}
-            if datacenter_id_name:
-                if utils.check_valid_uuid(datacenter_id_name):
-                    datacenter_id = datacenter_id_name
-                    where_["dt.datacenter_id"] = datacenter_id
-                else:
-                    datacenter_name = datacenter_id_name
-                    where_["d.name"] = datacenter_name
-            if datacenter_tenant_id:
-                where_["dt.uuid"] = datacenter_tenant_id
-            datacenters = mydb.get_rows(
-                SELECT=("dt.uuid as datacenter_tenant_id",),
-                FROM="datacenter_tenants as dt join tenants_datacenters as td on dt.uuid=td.datacenter_tenant_id "
-                     "join datacenters as d on d.uuid=dt.datacenter_id",
-                WHERE=where_)
-            if len(datacenters) > 1:
-                raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
-            elif datacenters:
-                thread_id = datacenters[0]["datacenter_tenant_id"]
-                thread = vim_threads["running"].get(thread_id)
-        if not thread:
-            raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
-        return thread_id, thread
-    except db_base_Exception as e:
-        raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
-
-
-def get_datacenter_uuid(mydb, tenant_id, datacenter_id_name):
-    WHERE_dict={}
-    if utils.check_valid_uuid(datacenter_id_name):
-        WHERE_dict['d.uuid'] = datacenter_id_name
-    else:
-        WHERE_dict['d.name'] = datacenter_id_name
-
-    if tenant_id:
-        WHERE_dict['nfvo_tenant_id'] = tenant_id
-        from_= "tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as" \
-               " dt on td.datacenter_tenant_id=dt.uuid"
-    else:
-        from_ = 'datacenters as d'
-    vimaccounts = mydb.get_rows(FROM=from_, SELECT=("d.uuid as uuid, d.name as name",), WHERE=WHERE_dict )
-    if len(vimaccounts) == 0:
-        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
-    elif len(vimaccounts)>1:
-        #print "nfvo.datacenter_action() error. Several datacenters found"
-        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
-    return vimaccounts[0]["uuid"], vimaccounts[0]["name"]
-
-
-def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
-    datacenter_id = None
-    datacenter_name = None
-    if datacenter_id_name:
-        if utils.check_valid_uuid(datacenter_id_name):
-            datacenter_id = datacenter_id_name
-        else:
-            datacenter_name = datacenter_id_name
-    vims = get_vim(mydb, tenant_id, datacenter_id, datacenter_name, **extra_filter)
-    if len(vims) == 0:
-        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
-    elif len(vims)>1:
-        #print "nfvo.datacenter_action() error. Several datacenters found"
-        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
-    return vims.keys()[0], vims.values()[0]
-
-
-def update(d, u):
-    """Takes dict d and updates it with the values in dict u.
-       It merges all depth levels"""
-    for k, v in u.iteritems():
-        if isinstance(v, collections.Mapping):
-            r = update(d.get(k, {}), v)
-            d[k] = r
-        else:
-            d[k] = u[k]
-    return d
-
-
-def create_instance(mydb, tenant_id, instance_dict):
-    # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-    # logger.debug("Creating instance...")
-    scenario = instance_dict["scenario"]
-
-    # find main datacenter
-    myvims = {}
-    myvim_threads_id = {}
-    datacenter = instance_dict.get("datacenter")
-    default_wim_account = instance_dict.get("wim_account")
-    default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-    myvims[default_datacenter_id] = vim
-    myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
-    tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
-    # myvim_tenant = myvim['tenant_id']
-    rollbackList = []
-
-    # print "Checking that the scenario exists and getting the scenario dictionary"
-    if isinstance(scenario, str):
-        scenarioDict = mydb.get_scenario(scenario, tenant_id, datacenter_vim_id=myvim_threads_id[default_datacenter_id],
-                                         datacenter_id=default_datacenter_id)
-    else:
-        scenarioDict = scenario
-        scenarioDict["uuid"] = None
-
-    # logger.debug(">>>>>> Dictionaries before merging")
-    # logger.debug(">>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
-    # logger.debug(">>>>>> ScenarioDict:\n{}".format(yaml.safe_dump(scenarioDict,default_flow_style=False, width=256)))
-
-    db_instance_vnfs = []
-    db_instance_vms = []
-    db_instance_interfaces = []
-    db_instance_sfis = []
-    db_instance_sfs = []
-    db_instance_classifications = []
-    db_instance_sfps = []
-    db_ip_profiles = []
-    db_vim_actions = []
-    uuid_list = []
-    task_index = 0
-    instance_name = instance_dict["name"]
-    instance_uuid = str(uuid4())
-    uuid_list.append(instance_uuid)
-    db_instance_scenario = {
-        "uuid": instance_uuid,
-        "name": instance_name,
-        "tenant_id": tenant_id,
-        "scenario_id": scenarioDict['uuid'],
-        "datacenter_id": default_datacenter_id,
-        # filled bellow 'datacenter_tenant_id'
-        "description": instance_dict.get("description"),
-    }
-    if scenarioDict.get("cloud-config"):
-        db_instance_scenario["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"],
-                                                              default_flow_style=True, width=256)
-    instance_action_id = get_task_id()
-    db_instance_action = {
-        "uuid": instance_action_id,   # same uuid for the instance and the action on create
-        "tenant_id": tenant_id,
-        "instance_id": instance_uuid,
-        "description": "CREATE",
-    }
-
-    # Auxiliary dictionaries from x to y
-    sce_net2instance = {}
-    net2task_id = {'scenario': {}}
-    # Mapping between local networks and WIMs
-    wim_usage = {}
-
-    def ip_profile_IM2RO(ip_profile_im):
-        # translate from input format to database format
-        ip_profile_ro = {}
-        if 'subnet-address' in ip_profile_im:
-            ip_profile_ro['subnet_address'] = ip_profile_im['subnet-address']
-        if 'ip-version' in ip_profile_im:
-            ip_profile_ro['ip_version'] = ip_profile_im['ip-version']
-        if 'gateway-address' in ip_profile_im:
-            ip_profile_ro['gateway_address'] = ip_profile_im['gateway-address']
-        if 'dns-address' in ip_profile_im:
-            ip_profile_ro['dns_address'] = ip_profile_im['dns-address']
-            if isinstance(ip_profile_ro['dns_address'], (list, tuple)):
-                ip_profile_ro['dns_address'] = ";".join(ip_profile_ro['dns_address'])
-        if 'dhcp' in ip_profile_im:
-            ip_profile_ro['dhcp_start_address'] = ip_profile_im['dhcp'].get('start-address')
-            ip_profile_ro['dhcp_enabled'] = ip_profile_im['dhcp'].get('enabled', True)
-            ip_profile_ro['dhcp_count'] = ip_profile_im['dhcp'].get('count')
-        return ip_profile_ro
-
-    # logger.debug("Creating instance from scenario-dict:\n%s",
-    #               yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
-    try:
-        # 0 check correct parameters
-        for net_name, net_instance_desc in instance_dict.get("networks", {}).iteritems():
-            for scenario_net in scenarioDict['nets']:
-                if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
-                    break
-            else:
-                raise NfvoException("Invalid scenario network name or id '{}' at instance:networks".format(net_name),
-                                    httperrors.Bad_Request)
-            if "sites" not in net_instance_desc:
-                net_instance_desc["sites"] = [ {} ]
-            site_without_datacenter_field = False
-            for site in net_instance_desc["sites"]:
-                if site.get("datacenter"):
-                    site["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, site["datacenter"])
-                    if site["datacenter"] not in myvims:
-                        # Add this datacenter to myvims
-                        d, v = get_datacenter_by_name_uuid(mydb, tenant_id, site["datacenter"])
-                        myvims[d] = v
-                        myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, site["datacenter"])
-                        site["datacenter"] = d  # change name to id
-                else:
-                    if site_without_datacenter_field:
-                        raise NfvoException("Found more than one entries without datacenter field at "
-                                            "instance:networks:{}:sites".format(net_name), httperrors.Bad_Request)
-                    site_without_datacenter_field = True
-                    site["datacenter"] = default_datacenter_id   # change name to id
-
-        for vnf_name, vnf_instance_desc in instance_dict.get("vnfs",{}).iteritems():
-            for scenario_vnf in scenarioDict['vnfs']:
-                if vnf_name == scenario_vnf['member_vnf_index'] or vnf_name == scenario_vnf['uuid'] or vnf_name == scenario_vnf['name']:
-                    break
-            else:
-                raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_name), httperrors.Bad_Request)
-            if "datacenter" in vnf_instance_desc:
-                # Add this datacenter to myvims
-                vnf_instance_desc["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
-                if vnf_instance_desc["datacenter"] not in myvims:
-                    d, v = get_datacenter_by_name_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
-                    myvims[d] = v
-                    myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, vnf_instance_desc["datacenter"])
-                scenario_vnf["datacenter"] = vnf_instance_desc["datacenter"]
-
-            for net_id, net_instance_desc in vnf_instance_desc.get("networks", {}).iteritems():
-                for scenario_net in scenario_vnf['nets']:
-                    if net_id == scenario_net['osm_id'] or net_id == scenario_net['uuid'] or net_id == scenario_net["name"]:
-                        break
-                else:
-                    raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), httperrors.Bad_Request)
-                if net_instance_desc.get("vim-network-name"):
-                    scenario_net["vim-network-name"] = net_instance_desc["vim-network-name"]
-                if net_instance_desc.get("vim-network-id"):
-                    scenario_net["vim-network-id"] = net_instance_desc["vim-network-id"]
-                if net_instance_desc.get("name"):
-                    scenario_net["name"] = net_instance_desc["name"]
-                if 'ip-profile' in net_instance_desc:
-                    ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
-                    if 'ip_profile' not in scenario_net:
-                        scenario_net['ip_profile'] = ipprofile_db
-                    else:
-                        update(scenario_net['ip_profile'], ipprofile_db)
-
-            for vdu_id, vdu_instance_desc in vnf_instance_desc.get("vdus", {}).iteritems():
-                for scenario_vm in scenario_vnf['vms']:
-                    if vdu_id == scenario_vm['osm_id'] or vdu_id == scenario_vm["name"]:
-                        break
-                else:
-                    raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
-                scenario_vm["instance_parameters"] = vdu_instance_desc
-                for iface_id, iface_instance_desc in vdu_instance_desc.get("interfaces", {}).iteritems():
-                    for scenario_interface in scenario_vm['interfaces']:
-                        if iface_id == scenario_interface['internal_name'] or iface_id == scenario_interface["external_name"]:
-                            scenario_interface.update(iface_instance_desc)
-                            break
-                    else:
-                        raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
-
-        # 0.1 parse cloud-config parameters
-        cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config"))
-
-        # 0.2 merge instance information into scenario
-        # Ideally, the operation should be as simple as: update(scenarioDict,instance_dict)
-        # However, this is not possible yet.
-        for net_name, net_instance_desc in instance_dict.get("networks", {}).iteritems():
-            for scenario_net in scenarioDict['nets']:
-                if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
-                    if "wim_account" in net_instance_desc and net_instance_desc["wim_account"] is not None:
-                        scenario_net["wim_account"] = net_instance_desc["wim_account"]
-                    if 'ip-profile' in net_instance_desc:
-                        ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
-                        if 'ip_profile' not in scenario_net:
-                            scenario_net['ip_profile'] = ipprofile_db
-                        else:
-                            update(scenario_net['ip_profile'], ipprofile_db)
-            for interface in net_instance_desc.get('interfaces', ()):
-                if 'ip_address' in interface:
-                    for vnf in scenarioDict['vnfs']:
-                        if interface['vnf'] == vnf['name']:
-                            for vnf_interface in vnf['interfaces']:
-                                if interface['vnf_interface'] == vnf_interface['external_name']:
-                                    vnf_interface['ip_address'] = interface['ip_address']
-
-        # logger.debug(">>>>>>>> Merged dictionary")
-        # logger.debug("Creating instance scenario-dict MERGED:\n%s",
-        #              yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
-
-        # 1. Creating new nets (sce_nets) in the VIM"
-        number_mgmt_networks = 0
-        db_instance_nets = []
-        for sce_net in scenarioDict['nets']:
-            sce_net_uuid = sce_net.get('uuid', sce_net["name"])
-            # get involved datacenters where this network need to be created
-            involved_datacenters = []
-            for sce_vnf in scenarioDict.get("vnfs", ()):
-                vnf_datacenter = sce_vnf.get("datacenter", default_datacenter_id)
-                if vnf_datacenter in involved_datacenters:
-                    continue
-                if sce_vnf.get("interfaces"):
-                    for sce_vnf_ifaces in sce_vnf["interfaces"]:
-                        if sce_vnf_ifaces.get("sce_net_id") == sce_net["uuid"]:
-                            involved_datacenters.append(vnf_datacenter)
-                            break
-            if not involved_datacenters:
-                involved_datacenters.append(default_datacenter_id)
-            target_wim_account = sce_net.get("wim_account", default_wim_account)
-
-            # --> WIM
-            # TODO: use this information during network creation
-            wim_account_id = wim_account_name = None
-            if len(involved_datacenters) > 1 and 'uuid' in sce_net:
-                if target_wim_account is None or target_wim_account is True:  # automatic selection of WIM
-                    # OBS: sce_net without uuid are used internally to VNFs
-                    # and the assumption is that VNFs will not be split among
-                    # different datacenters
-                    wim_account = wim_engine.find_suitable_wim_account(
-                        involved_datacenters, tenant_id)
-                    wim_account_id = wim_account['uuid']
-                    wim_account_name = wim_account['name']
-                    wim_usage[sce_net['uuid']] = wim_account_id
-                elif isinstance(target_wim_account, str):     # manual selection of WIM
-                    wim_account.persist.get_wim_account_by(target_wim_account, tenant_id)
-                    wim_account_id = wim_account['uuid']
-                    wim_account_name = wim_account['name']
-                    wim_usage[sce_net['uuid']] = wim_account_id
-                else:  # not WIM usage
-                    wim_usage[sce_net['uuid']] = False
-            # <-- WIM
-
-            descriptor_net = {}
-            if instance_dict.get("networks"):
-                if sce_net.get("uuid") in instance_dict["networks"]:
-                    descriptor_net = instance_dict["networks"][sce_net["uuid"]]
-                    descriptor_net_name = sce_net["uuid"]
-                elif sce_net.get("osm_id") in instance_dict["networks"]:
-                    descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
-                    descriptor_net_name = sce_net["osm_id"]
-                elif sce_net["name"] in instance_dict["networks"]:
-                    descriptor_net = instance_dict["networks"][sce_net["name"]]
-                    descriptor_net_name = sce_net["name"]
-            net_name = descriptor_net.get("vim-network-name")
-            # add datacenters from instantiation parameters
-            if descriptor_net.get("sites"):
-                for site in descriptor_net["sites"]:
-                    if site.get("datacenter") and site["datacenter"] not in involved_datacenters:
-                        involved_datacenters.append(site["datacenter"])
-            sce_net2instance[sce_net_uuid] = {}
-            net2task_id['scenario'][sce_net_uuid] = {}
-
-            use_network = None
-            related_network = None
-            if descriptor_net.get("use-network"):
-                target_instance_nets = mydb.get_rows(
-                    SELECT="related",
-                    FROM="instance_nets",
-                    WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
-                           "osm_id":  descriptor_net["use-network"]["osm_id"]},
-                )
-                if not target_instance_nets:
-                    raise NfvoException(
-                        "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
-                        httperrors.Bad_Request)
-                else:
-                    use_network = target_instance_nets[0]["related"]
-
-            if sce_net["external"]:
-                number_mgmt_networks += 1
-
-            for datacenter_id in involved_datacenters:
-                netmap_use = None
-                netmap_create = None
-                if descriptor_net.get("sites"):
-                    for site in descriptor_net["sites"]:
-                        if site.get("datacenter") == datacenter_id:
-                            netmap_use = site.get("netmap-use")
-                            netmap_create = site.get("netmap-create")
-                            break
-
-                vim = myvims[datacenter_id]
-                myvim_thread_id = myvim_threads_id[datacenter_id]
-
-                net_type = sce_net['type']
-                net_vim_name = None
-                lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'}  # 'shared': True
-
-                if not net_name:
-                    if sce_net["external"]:
-                        net_name = sce_net["name"]
-                    else:
-                        net_name = "{}-{}".format(instance_name, sce_net["name"])
-                        net_name = net_name[:255]     # limit length
-
-                if netmap_use or netmap_create:
-                    create_network = False
-                    lookfor_network = False
-                    if netmap_use:
-                        lookfor_network = True
-                        if utils.check_valid_uuid(netmap_use):
-                            lookfor_filter["id"] = netmap_use
-                        else:
-                            lookfor_filter["name"] = netmap_use
-                    if netmap_create:
-                        create_network = True
-                        net_vim_name = net_name
-                        if isinstance(netmap_create, str):
-                            net_vim_name = netmap_create
-                elif sce_net.get("vim_network_name"):
-                    create_network = False
-                    lookfor_network = True
-                    lookfor_filter["name"] = sce_net.get("vim_network_name")
-                elif sce_net["external"]:
-                    if sce_net.get('vim_id'):
-                        # there is a netmap at datacenter_nets database   # TODO REVISE!!!!
-                        create_network = False
-                        lookfor_network = True
-                        lookfor_filter["id"] = sce_net['vim_id']
-                    elif vim["config"].get("management_network_id") or vim["config"].get("management_network_name"):
-                        if number_mgmt_networks > 1:
-                            raise NfvoException("Found several VLD of type mgmt. "
-                                                "You must concrete what vim-network must be use for each one",
-                                                httperrors.Bad_Request)
-                        create_network = False
-                        lookfor_network = True
-                        if vim["config"].get("management_network_id"):
-                            lookfor_filter["id"] = vim["config"]["management_network_id"]
-                        else:
-                            lookfor_filter["name"] = vim["config"]["management_network_name"]
-                    else:
-                        # There is not a netmap, look at datacenter for a net with this name and create if not found
-                        create_network = True
-                        lookfor_network = True
-                        lookfor_filter["name"] = sce_net["name"]
-                        net_vim_name = sce_net["name"]
-                else:
-                    net_vim_name = net_name
-                    create_network = True
-                    lookfor_network = False
-
-                task_extra = {}
-                if create_network:
-                    task_action = "CREATE"
-                    task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None), wim_account_name)
-                    if lookfor_network:
-                        task_extra["find"] = (lookfor_filter,)
-                elif lookfor_network:
-                    task_action = "FIND"
-                    task_extra["params"] = (lookfor_filter,)
-
-                # fill database content
-                net_uuid = str(uuid4())
-                uuid_list.append(net_uuid)
-                sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
-                if not related_network:   # all db_instance_nets will have same related
-                    related_network = use_network or net_uuid
-                db_net = {
-                    "uuid": net_uuid,
-                    "osm_id": sce_net.get("osm_id") or sce_net["name"],
-                    "related": related_network,
-                    'vim_net_id': None,
-                    "vim_name": net_vim_name,
-                    "instance_scenario_id": instance_uuid,
-                    "sce_net_id": sce_net.get("uuid"),
-                    "created": create_network,
-                    'datacenter_id': datacenter_id,
-                    'datacenter_tenant_id': myvim_thread_id,
-                    'status': 'BUILD' #  if create_network else "ACTIVE"
-                }
-                db_instance_nets.append(db_net)
-                db_vim_action = {
-                    "instance_action_id": instance_action_id,
-                    "status": "SCHEDULED",
-                    "task_index": task_index,
-                    "datacenter_vim_id": myvim_thread_id,
-                    "action": task_action,
-                    "item": "instance_nets",
-                    "item_id": net_uuid,
-                    "related": related_network,
-                    "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
-                }
-                net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
-                task_index += 1
-                db_vim_actions.append(db_vim_action)
-
-            if 'ip_profile' in sce_net:
-                db_ip_profile={
-                    'instance_net_id': net_uuid,
-                    'ip_version': sce_net['ip_profile']['ip_version'],
-                    'subnet_address': sce_net['ip_profile']['subnet_address'],
-                    'gateway_address': sce_net['ip_profile']['gateway_address'],
-                    'dns_address': sce_net['ip_profile']['dns_address'],
-                    'dhcp_enabled': sce_net['ip_profile']['dhcp_enabled'],
-                    'dhcp_start_address': sce_net['ip_profile']['dhcp_start_address'],
-                    'dhcp_count': sce_net['ip_profile']['dhcp_count'],
-                }
-                db_ip_profiles.append(db_ip_profile)
-
-        # Create VNFs
-        vnf_params = {
-            "default_datacenter_id": default_datacenter_id,
-            "myvim_threads_id": myvim_threads_id,
-            "instance_uuid": instance_uuid,
-            "instance_name": instance_name,
-            "instance_action_id": instance_action_id,
-            "myvims": myvims,
-            "cloud_config": cloud_config,
-            "RO_pub_key": tenant[0].get('RO_pub_key'),
-            "instance_parameters": instance_dict,
-        }
-        vnf_params_out = {
-            "task_index": task_index,
-            "uuid_list": uuid_list,
-            "db_instance_nets": db_instance_nets,
-            "db_vim_actions": db_vim_actions,
-            "db_ip_profiles": db_ip_profiles,
-            "db_instance_vnfs": db_instance_vnfs,
-            "db_instance_vms": db_instance_vms,
-            "db_instance_interfaces": db_instance_interfaces,
-            "net2task_id": net2task_id,
-            "sce_net2instance": sce_net2instance,
-        }
-        # sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name'])
-        for sce_vnf in scenarioDict.get('vnfs', ()):  # sce_vnf_list:
-            instantiate_vnf(mydb, sce_vnf, vnf_params, vnf_params_out, rollbackList)
-        task_index = vnf_params_out["task_index"]
-        uuid_list = vnf_params_out["uuid_list"]
-
-        # Create VNFFGs
-        # task_depends_on = []
-        for vnffg in scenarioDict.get('vnffgs', ()):
-            for rsp in vnffg['rsps']:
-                sfs_created = []
-                for cp in rsp['connection_points']:
-                    count = mydb.get_rows(
-                            SELECT='vms.count',
-                            FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h "
-                                 "on interfaces.uuid=h.ingress_interface_id",
-                            WHERE={'h.uuid': cp['uuid']})[0]['count']
-                    instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
-                    instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
-                    dependencies = []
-                    for instance_vm in instance_vms:
-                        action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
-                        if action:
-                            dependencies.append(action['task_index'])
-                        # TODO: throw exception if count != len(instance_vms)
-                        # TODO: and action shouldn't ever be None
-                    sfis_created = []
-                    for i in range(count):
-                        # create sfis
-                        sfi_uuid = str(uuid4())
-                        extra_params = {
-                            "ingress_interface_id": cp["ingress_interface_id"],
-                            "egress_interface_id": cp["egress_interface_id"]
-                        }
-                        uuid_list.append(sfi_uuid)
-                        db_sfi = {
-                            "uuid": sfi_uuid,
-                            "related": sfi_uuid,
-                            "instance_scenario_id": instance_uuid,
-                            'sce_rsp_hop_id': cp['uuid'],
-                            'datacenter_id': datacenter_id,
-                            'datacenter_tenant_id': myvim_thread_id,
-                            "vim_sfi_id": None, # vim thread will populate
-                        }
-                        db_instance_sfis.append(db_sfi)
-                        db_vim_action = {
-                            "instance_action_id": instance_action_id,
-                            "task_index": task_index,
-                            "datacenter_vim_id": myvim_thread_id,
-                            "action": "CREATE",
-                            "status": "SCHEDULED",
-                            "item": "instance_sfis",
-                            "item_id": sfi_uuid,
-                            "related": sfi_uuid,
-                            "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
-                                                    default_flow_style=True, width=256)
-                        }
-                        sfis_created.append(task_index)
-                        task_index += 1
-                        db_vim_actions.append(db_vim_action)
-                    # create sfs
-                    sf_uuid = str(uuid4())
-                    uuid_list.append(sf_uuid)
-                    db_sf = {
-                        "uuid": sf_uuid,
-                        "related": sf_uuid,
-                        "instance_scenario_id": instance_uuid,
-                        'sce_rsp_hop_id': cp['uuid'],
-                        'datacenter_id': datacenter_id,
-                        'datacenter_tenant_id': myvim_thread_id,
-                        "vim_sf_id": None, # vim thread will populate
-                    }
-                    db_instance_sfs.append(db_sf)
-                    db_vim_action = {
-                        "instance_action_id": instance_action_id,
-                        "task_index": task_index,
-                        "datacenter_vim_id": myvim_thread_id,
-                        "action": "CREATE",
-                        "status": "SCHEDULED",
-                        "item": "instance_sfs",
-                        "item_id": sf_uuid,
-                        "related": sf_uuid,
-                        "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
-                                                default_flow_style=True, width=256)
-                    }
-                    sfs_created.append(task_index)
-                    task_index += 1
-                    db_vim_actions.append(db_vim_action)
-                classifier = rsp['classifier']
-
-                # TODO the following ~13 lines can be reused for the sfi case
-                count = mydb.get_rows(
-                        SELECT=('vms.count'),
-                        FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
-                        WHERE={'c.uuid': classifier['uuid']})[0]['count']
-                instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
-                instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
-                dependencies = []
-                for instance_vm in instance_vms:
-                    action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
-                    if action:
-                        dependencies.append(action['task_index'])
-                    # TODO: throw exception if count != len(instance_vms)
-                    # TODO: and action shouldn't ever be None
-                classifications_created = []
-                for i in range(count):
-                    for match in classifier['matches']:
-                        # create classifications
-                        classification_uuid = str(uuid4())
-                        uuid_list.append(classification_uuid)
-                        db_classification = {
-                            "uuid": classification_uuid,
-                            "related": classification_uuid,
-                            "instance_scenario_id": instance_uuid,
-                            'sce_classifier_match_id': match['uuid'],
-                            'datacenter_id': datacenter_id,
-                            'datacenter_tenant_id': myvim_thread_id,
-                            "vim_classification_id": None, # vim thread will populate
-                        }
-                        db_instance_classifications.append(db_classification)
-                        classification_params = {
-                            "ip_proto": match["ip_proto"],
-                            "source_ip": match["source_ip"],
-                            "destination_ip": match["destination_ip"],
-                            "source_port": match["source_port"],
-                            "destination_port": match["destination_port"]
-                        }
-                        db_vim_action = {
-                            "instance_action_id": instance_action_id,
-                            "task_index": task_index,
-                            "datacenter_vim_id": myvim_thread_id,
-                            "action": "CREATE",
-                            "status": "SCHEDULED",
-                            "item": "instance_classifications",
-                            "item_id": classification_uuid,
-                            "related": classification_uuid,
-                            "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
-                                                    default_flow_style=True, width=256)
-                        }
-                        classifications_created.append(task_index)
-                        task_index += 1
-                        db_vim_actions.append(db_vim_action)
-
-                # create sfps
-                sfp_uuid = str(uuid4())
-                uuid_list.append(sfp_uuid)
-                db_sfp = {
-                    "uuid": sfp_uuid,
-                    "related": sfp_uuid,
-                    "instance_scenario_id": instance_uuid,
-                    'sce_rsp_id': rsp['uuid'],
-                    'datacenter_id': datacenter_id,
-                    'datacenter_tenant_id': myvim_thread_id,
-                    "vim_sfp_id": None, # vim thread will populate
-                }
-                db_instance_sfps.append(db_sfp)
-                db_vim_action = {
-                    "instance_action_id": instance_action_id,
-                    "task_index": task_index,
-                    "datacenter_vim_id": myvim_thread_id,
-                    "action": "CREATE",
-                    "status": "SCHEDULED",
-                    "item": "instance_sfps",
-                    "item_id": sfp_uuid,
-                    "related": sfp_uuid,
-                    "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
-                                            default_flow_style=True, width=256)
-                }
-                task_index += 1
-                db_vim_actions.append(db_vim_action)
-        db_instance_action["number_tasks"] = task_index
-
-        # --> WIM
-        logger.debug('wim_usage:\n%s\n\n', pformat(wim_usage))
-        wan_links = wim_engine.derive_wan_links(wim_usage, db_instance_nets, tenant_id)
-        wim_actions = wim_engine.create_actions(wan_links)
-        wim_actions, db_instance_action = (
-            wim_engine.incorporate_actions(wim_actions, db_instance_action))
-        # <-- WIM
-
-        scenarioDict["datacenter2tenant"] = myvim_threads_id
-
-        db_instance_scenario['datacenter_tenant_id'] = myvim_threads_id[default_datacenter_id]
-        db_instance_scenario['datacenter_id'] = default_datacenter_id
-        db_tables=[
-            {"instance_scenarios": db_instance_scenario},
-            {"instance_vnfs": db_instance_vnfs},
-            {"instance_nets": db_instance_nets},
-            {"ip_profiles": db_ip_profiles},
-            {"instance_vms": db_instance_vms},
-            {"instance_interfaces": db_instance_interfaces},
-            {"instance_actions": db_instance_action},
-            {"instance_sfis": db_instance_sfis},
-            {"instance_sfs": db_instance_sfs},
-            {"instance_classifications": db_instance_classifications},
-            {"instance_sfps": db_instance_sfps},
-            {"instance_wim_nets": wan_links},
-            {"vim_wim_actions": db_vim_actions + wim_actions}
-        ]
-
-        logger.debug("create_instance done DB tables: %s",
-                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
-        mydb.new_rows(db_tables, uuid_list)
-        for myvim_thread_id in myvim_threads_id.values():
-            vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
-
-        wim_engine.dispatch(wim_actions)
-
-        returned_instance = mydb.get_instance_scenario(instance_uuid)
-        returned_instance["action_id"] = instance_action_id
-        return returned_instance
-    except (NfvoException, vimconn.vimconnException, wimconn.WimConnectorError, db_base_Exception) as e:
-        message = rollback(mydb, myvims, rollbackList)
-        if isinstance(e, db_base_Exception):
-            error_text = "database Exception"
-        elif isinstance(e, vimconn.vimconnException):
-            error_text = "VIM Exception"
-        elif isinstance(e, wimconn.WimConnectorError):
-            error_text = "WIM Exception"
-        else:
-            error_text = "Exception"
-        error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
-        # logger.error("create_instance: %s", error_text)
-        logger.exception(e)
-        raise NfvoException(error_text, e.http_code)
-
-
-def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
-    default_datacenter_id = params["default_datacenter_id"]
-    myvim_threads_id = params["myvim_threads_id"]
-    instance_uuid = params["instance_uuid"]
-    instance_name = params["instance_name"]
-    instance_action_id = params["instance_action_id"]
-    myvims = params["myvims"]
-    cloud_config = params["cloud_config"]
-    RO_pub_key = params["RO_pub_key"]
-
-    task_index = params_out["task_index"]
-    uuid_list = params_out["uuid_list"]
-    db_instance_nets = params_out["db_instance_nets"]
-    db_vim_actions = params_out["db_vim_actions"]
-    db_ip_profiles = params_out["db_ip_profiles"]
-    db_instance_vnfs = params_out["db_instance_vnfs"]
-    db_instance_vms = params_out["db_instance_vms"]
-    db_instance_interfaces = params_out["db_instance_interfaces"]
-    net2task_id = params_out["net2task_id"]
-    sce_net2instance = params_out["sce_net2instance"]
-
-    vnf_net2instance = {}
-
-    # 2. Creating new nets (vnf internal nets) in the VIM"
-    # For each vnf net, we create it and we add it to instanceNetlist.
-    if sce_vnf.get("datacenter"):
-        datacenter_id = sce_vnf["datacenter"]
-        myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
-    else:
-        datacenter_id = default_datacenter_id
-        myvim_thread_id = myvim_threads_id[default_datacenter_id]
-    for net in sce_vnf['nets']:
-        # TODO revis
-        # descriptor_net = instance_dict.get("vnfs", {}).get(sce_vnf["name"], {})
-        # net_name = descriptor_net.get("name")
-        net_name = None
-        if not net_name:
-            net_name = "{}-{}".format(instance_name, net["name"])
-            net_name = net_name[:255]  # limit length
-        net_type = net['type']
-
-        if sce_vnf['uuid'] not in vnf_net2instance:
-            vnf_net2instance[sce_vnf['uuid']] = {}
-        if sce_vnf['uuid'] not in net2task_id:
-            net2task_id[sce_vnf['uuid']] = {}
-        net2task_id[sce_vnf['uuid']][net['uuid']] = task_index
-
-        # fill database content
-        net_uuid = str(uuid4())
-        uuid_list.append(net_uuid)
-        vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
-        db_net = {
-            "uuid": net_uuid,
-            "related": net_uuid,
-            'vim_net_id': None,
-            "vim_name": net_name,
-            "instance_scenario_id": instance_uuid,
-            "net_id": net["uuid"],
-            "created": True,
-            'datacenter_id': datacenter_id,
-            'datacenter_tenant_id': myvim_thread_id,
-        }
-        db_instance_nets.append(db_net)
-
-        lookfor_filter = {}
-        if net.get("vim-network-name"):
-            lookfor_filter["name"] = net["vim-network-name"]
-        if net.get("vim-network-id"):
-            lookfor_filter["id"] = net["vim-network-id"]
-        if lookfor_filter:
-            task_action = "FIND"
-            task_extra = {"params": (lookfor_filter,)}
-        else:
-            task_action = "CREATE"
-            task_extra = {"params": (net_name, net_type, net.get('ip_profile', None))}
-
-        db_vim_action = {
-            "instance_action_id": instance_action_id,
-            "task_index": task_index,
-            "datacenter_vim_id": myvim_thread_id,
-            "status": "SCHEDULED",
-            "action": task_action,
-            "item": "instance_nets",
-            "item_id": net_uuid,
-            "related": net_uuid,
-            "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
-        }
-        task_index += 1
-        db_vim_actions.append(db_vim_action)
-
-        if 'ip_profile' in net:
-            db_ip_profile = {
-                'instance_net_id': net_uuid,
-                'ip_version': net['ip_profile']['ip_version'],
-                'subnet_address': net['ip_profile']['subnet_address'],
-                'gateway_address': net['ip_profile']['gateway_address'],
-                'dns_address': net['ip_profile']['dns_address'],
-                'dhcp_enabled': net['ip_profile']['dhcp_enabled'],
-                'dhcp_start_address': net['ip_profile']['dhcp_start_address'],
-                'dhcp_count': net['ip_profile']['dhcp_count'],
-            }
-            db_ip_profiles.append(db_ip_profile)
-
-    # print "vnf_net2instance:"
-    # print yaml.safe_dump(vnf_net2instance, indent=4, default_flow_style=False)
-
-    # 3. Creating new vm instances in the VIM
-    # myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
-    ssh_access = None
-    if sce_vnf.get('mgmt_access'):
-        ssh_access = sce_vnf['mgmt_access'].get('config-access', {}).get('ssh-access')
-    vnf_availability_zones = []
-    for vm in sce_vnf.get('vms'):
-        vm_av = vm.get('availability_zone')
-        if vm_av and vm_av not in vnf_availability_zones:
-            vnf_availability_zones.append(vm_av)
-
-    # check if there is enough availability zones available at vim level.
-    if myvims[datacenter_id].availability_zone and vnf_availability_zones:
-        if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
-            raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
-
-    if sce_vnf.get("datacenter"):
-        vim = myvims[sce_vnf["datacenter"]]
-        myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
-        datacenter_id = sce_vnf["datacenter"]
-    else:
-        vim = myvims[default_datacenter_id]
-        myvim_thread_id = myvim_threads_id[default_datacenter_id]
-        datacenter_id = default_datacenter_id
-    sce_vnf["datacenter_id"] = datacenter_id
-    i = 0
-
-    vnf_uuid = str(uuid4())
-    uuid_list.append(vnf_uuid)
-    db_instance_vnf = {
-        'uuid': vnf_uuid,
-        'instance_scenario_id': instance_uuid,
-        'vnf_id': sce_vnf['vnf_id'],
-        'sce_vnf_id': sce_vnf['uuid'],
-        'datacenter_id': datacenter_id,
-        'datacenter_tenant_id': myvim_thread_id,
-    }
-    db_instance_vnfs.append(db_instance_vnf)
-
-    for vm in sce_vnf['vms']:
-        # skip PDUs
-        if vm.get("pdu_type"):
-            continue
-
-        myVMDict = {}
-        sce_vnf_name = sce_vnf['member_vnf_index'] if sce_vnf['member_vnf_index'] else sce_vnf['name']
-        myVMDict['name'] = "{}-{}-{}".format(instance_name[:64], sce_vnf_name[:64], vm["name"][:64])
-        myVMDict['description'] = myVMDict['name'][0:99]
-        #                if not startvms:
-        #                    myVMDict['start'] = "no"
-        if vm.get("instance_parameters") and vm["instance_parameters"].get("name"):
-            myVMDict['name'] = vm["instance_parameters"].get("name")
-        myVMDict['name'] = myVMDict['name'][0:255]  # limit name length
-        # create image at vim in case it not exist
-        image_uuid = vm['image_id']
-        if vm.get("image_list"):
-            for alternative_image in vm["image_list"]:
-                if alternative_image["vim_type"] == vim["config"]["_vim_type_internal"]:
-                    image_uuid = alternative_image['image_id']
-                    break
-        image_dict = mydb.get_table_by_uuid_name("images", image_uuid)
-        image_id = create_or_use_image(mydb, {datacenter_id: vim}, image_dict, [], True)
-        vm['vim_image_id'] = image_id
-
-        # create flavor at vim in case it not exist
-        flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
-        if flavor_dict['extended'] != None:
-            flavor_dict['extended'] = yaml.load(flavor_dict['extended'])
-        flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)
-
-        # Obtain information for additional disks
-        extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',),
-                                             WHERE={'vim_id': flavor_id})
-        if not extended_flavor_dict:
-            raise NfvoException("flavor '{}' not found".format(flavor_id), httperrors.Not_Found)
-
-        # extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0])
-        myVMDict['disks'] = None
-        extended_info = extended_flavor_dict[0]['extended']
-        if extended_info != None:
-            extended_flavor_dict_yaml = yaml.load(extended_info)
-            if 'disks' in extended_flavor_dict_yaml:
-                myVMDict['disks'] = extended_flavor_dict_yaml['disks']
-                if vm.get("instance_parameters") and vm["instance_parameters"].get("devices"):
-                    for disk in myVMDict['disks']:
-                        if disk.get("name") in vm["instance_parameters"]["devices"]:
-                            disk.update(vm["instance_parameters"]["devices"][disk.get("name")])
-
-        vm['vim_flavor_id'] = flavor_id
-        myVMDict['imageRef'] = vm['vim_image_id']
-        myVMDict['flavorRef'] = vm['vim_flavor_id']
-        myVMDict['availability_zone'] = vm.get('availability_zone')
-        myVMDict['networks'] = []
-        task_depends_on = []
-        # TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
-        is_management_vm = False
-        db_vm_ifaces = []
-        for iface in vm['interfaces']:
-            netDict = {}
-            if iface['type'] == "data":
-                netDict['type'] = iface['model']
-            elif "model" in iface and iface["model"] != None:
-                netDict['model'] = iface['model']
-            # TODO in future, remove this because mac_address will not be set, and the type of PV,VF
-            # is obtained from iterface table model
-            # discover type of interface looking at flavor
-            for numa in flavor_dict.get('extended', {}).get('numas', []):
-                for flavor_iface in numa.get('interfaces', []):
-                    if flavor_iface.get('name') == iface['internal_name']:
-                        if flavor_iface['dedicated'] == 'yes':
-                            netDict['type'] = "PF"  # passthrough
-                        elif flavor_iface['dedicated'] == 'no':
-                            netDict['type'] = "VF"  # siov
-                        elif flavor_iface['dedicated'] == 'yes:sriov':
-                            netDict['type'] = "VFnotShared"  # sriov but only one sriov on the PF
-                        netDict["mac_address"] = flavor_iface.get("mac_address")
-                        break
-            netDict["use"] = iface['type']
-            if netDict["use"] == "data" and not netDict.get("type"):
-                # print "netDict", netDict
-                # print "iface", iface
-                e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".fromat(
-                    sce_vnf['name'], vm['name'], iface['internal_name'])
-                if flavor_dict.get('extended') == None:
-                    raise NfvoException(e_text + "After database migration some information is not available. \
-                            Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
-                else:
-                    raise NfvoException(e_text, httperrors.Internal_Server_Error)
-            if netDict["use"] == "mgmt":
-                is_management_vm = True
-                netDict["type"] = "virtual"
-            if netDict["use"] == "bridge":
-                netDict["type"] = "virtual"
-            if iface.get("vpci"):
-                netDict['vpci'] = iface['vpci']
-            if iface.get("mac"):
-                netDict['mac_address'] = iface['mac']
-            if iface.get("mac_address"):
-                netDict['mac_address'] = iface['mac_address']
-            if iface.get("ip_address"):
-                netDict['ip_address'] = iface['ip_address']
-            if iface.get("port-security") is not None:
-                netDict['port_security'] = iface['port-security']
-            if iface.get("floating-ip") is not None:
-                netDict['floating_ip'] = iface['floating-ip']
-            netDict['name'] = iface['internal_name']
-            if iface['net_id'] is None:
-                for vnf_iface in sce_vnf["interfaces"]:
-                    # print iface
-                    # print vnf_iface
-                    if vnf_iface['interface_id'] == iface['uuid']:
-                        netDict['net_id'] = "TASK-{}".format(
-                            net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
-                        instance_net_id = sce_net2instance[vnf_iface['sce_net_id']][datacenter_id]
-                        task_depends_on.append(net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
-                        break
-            else:
-                netDict['net_id'] = "TASK-{}".format(net2task_id[sce_vnf['uuid']][iface['net_id']])
-                instance_net_id = vnf_net2instance[sce_vnf['uuid']][iface['net_id']]
-                task_depends_on.append(net2task_id[sce_vnf['uuid']][iface['net_id']])
-            # skip bridge ifaces not connected to any net
-            if 'net_id' not in netDict or netDict['net_id'] == None:
-                continue
-            myVMDict['networks'].append(netDict)
-            db_vm_iface = {
-                # "uuid"
-                # 'instance_vm_id': instance_vm_uuid,
-                "instance_net_id": instance_net_id,
-                'interface_id': iface['uuid'],
-                # 'vim_interface_id': ,
-                'type': 'external' if iface['external_name'] is not None else 'internal',
-                'ip_address': iface.get('ip_address'),
-                'mac_address': iface.get('mac'),
-                'floating_ip': int(iface.get('floating-ip', False)),
-                'port_security': int(iface.get('port-security', True))
-            }
-            db_vm_ifaces.append(db_vm_iface)
-        # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
-        # print myVMDict['name']
-        # print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
-        # print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
-        # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
-
-        # We add the RO key to cloud_config if vnf will need ssh access
-        cloud_config_vm = cloud_config
-        if is_management_vm and params["instance_parameters"].get("mgmt_keys"):
-            cloud_config_vm = unify_cloud_config({"key-pairs": params["instance_parameters"]["mgmt_keys"]},
-                                                  cloud_config_vm)
-
-        if vm.get("instance_parameters") and "mgmt_keys" in vm["instance_parameters"]:
-            if vm["instance_parameters"]["mgmt_keys"]:
-                cloud_config_vm = unify_cloud_config({"key-pairs": vm["instance_parameters"]["mgmt_keys"]},
-                                                     cloud_config_vm)
-            if RO_pub_key:
-                cloud_config_vm = unify_cloud_config(cloud_config_vm, {"key-pairs": [RO_pub_key]})
-        if vm.get("boot_data"):
-            cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config_vm)
-
-        if myVMDict.get('availability_zone'):
-            av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
-        else:
-            av_index = None
-        for vm_index in range(0, vm.get('count', 1)):
-            vm_name = myVMDict['name'] + "-" + str(vm_index+1)
-            task_params = (vm_name, myVMDict['description'], myVMDict.get('start', None),
-                           myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'], cloud_config_vm,
-                           myVMDict['disks'], av_index, vnf_availability_zones)
-            # put interface uuid back to scenario[vnfs][vms[[interfaces]
-            for net in myVMDict['networks']:
-                if "vim_id" in net:
-                    for iface in vm['interfaces']:
-                        if net["name"] == iface["internal_name"]:
-                            iface["vim_id"] = net["vim_id"]
-                            break
-            vm_uuid = str(uuid4())
-            uuid_list.append(vm_uuid)
-            db_vm = {
-                "uuid": vm_uuid,
-                "related": vm_uuid,
-                'instance_vnf_id': vnf_uuid,
-                # TODO delete "vim_vm_id": vm_id,
-                "vm_id": vm["uuid"],
-                "vim_name": vm_name,
-                # "status":
-            }
-            db_instance_vms.append(db_vm)
-
-            iface_index = 0
-            for db_vm_iface in db_vm_ifaces:
-                iface_uuid = str(uuid4())
-                uuid_list.append(iface_uuid)
-                db_vm_iface_instance = {
-                    "uuid": iface_uuid,
-                    "instance_vm_id": vm_uuid
-                }
-                db_vm_iface_instance.update(db_vm_iface)
-                if db_vm_iface_instance.get("ip_address"):  # increment ip_address
-                    ip = db_vm_iface_instance.get("ip_address")
-                    i = ip.rfind(".")
-                    if i > 0:
-                        try:
-                            i += 1
-                            ip = ip[i:] + str(int(ip[:i]) + 1)
-                            db_vm_iface_instance["ip_address"] = ip
-                        except:
-                            db_vm_iface_instance["ip_address"] = None
-                db_instance_interfaces.append(db_vm_iface_instance)
-                myVMDict['networks'][iface_index]["uuid"] = iface_uuid
-                iface_index += 1
-
-            db_vim_action = {
-                "instance_action_id": instance_action_id,
-                "task_index": task_index,
-                "datacenter_vim_id": myvim_thread_id,
-                "action": "CREATE",
-                "status": "SCHEDULED",
-                "item": "instance_vms",
-                "item_id": vm_uuid,
-                "related": vm_uuid,
-                "extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
-                                        default_flow_style=True, width=256)
-            }
-            task_index += 1
-            db_vim_actions.append(db_vim_action)
-    params_out["task_index"] = task_index
-    params_out["uuid_list"] = uuid_list
-
-
-def delete_instance(mydb, tenant_id, instance_id):
-    # print "Checking that the instance_id exists and getting the instance dictionary"
-    instanceDict = mydb.get_instance_scenario(instance_id, tenant_id)
-    # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
-    tenant_id = instanceDict["tenant_id"]
-
-    # --> WIM
-    # We need to retrieve the WIM Actions now, before the instance_scenario is
-    # deleted. The reason for that is that: ON CASCADE rules will delete the
-    # instance_wim_nets record in the database
-    wim_actions = wim_engine.delete_actions(instance_scenario_id=instance_id)
-    # <-- WIM
-
-    # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-    # 1. Delete from Database
-    message = mydb.delete_instance_scenario(instance_id, tenant_id)
-
-    # 2. delete from VIM
-    error_msg = ""
-    myvims = {}
-    myvim_threads = {}
-    vimthread_affected = {}
-    net2vm_dependencies = {}
-
-    task_index = 0
-    instance_action_id = get_task_id()
-    db_vim_actions = []
-    db_instance_action = {
-        "uuid": instance_action_id,   # same uuid for the instance and the action on create
-        "tenant_id": tenant_id,
-        "instance_id": instance_id,
-        "description": "DELETE",
-        # "number_tasks": 0 # filled bellow
-    }
-
-    # 2.1 deleting VNFFGs
-    for sfp in instanceDict.get('sfps', ()):
-        vimthread_affected[sfp["datacenter_tenant_id"]] = None
-        datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
-        if datacenter_key not in myvims:
-            try:
-                _, myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
-            except NfvoException as e:
-                logger.error(str(e))
-                myvim_thread = None
-            myvim_threads[datacenter_key] = myvim_thread
-            vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
-                           datacenter_tenant_id=sfp["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        myvim = myvims[datacenter_key]
-        myvim_thread = myvim_threads[datacenter_key]
-
-        if not myvim:
-            error_msg += "\n    vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
-            continue
-        extra = {"params": (sfp['vim_sfp_id'])}
-        db_vim_action = {
-            "instance_action_id": instance_action_id,
-            "task_index": task_index,
-            "datacenter_vim_id": sfp["datacenter_tenant_id"],
-            "action": "DELETE",
-            "status": "SCHEDULED",
-            "item": "instance_sfps",
-            "item_id": sfp["uuid"],
-            "related": sfp["related"],
-            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
-        }
-        task_index += 1
-        db_vim_actions.append(db_vim_action)
-
-    for classification in instanceDict['classifications']:
-        vimthread_affected[classification["datacenter_tenant_id"]] = None
-        datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
-        if datacenter_key not in myvims:
-            try:
-                _, myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
-            except NfvoException as e:
-                logger.error(str(e))
-                myvim_thread = None
-            myvim_threads[datacenter_key] = myvim_thread
-            vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
-                           datacenter_tenant_id=classification["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"],
-                                                                                               classification["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        myvim = myvims[datacenter_key]
-        myvim_thread = myvim_threads[datacenter_key]
-
-        if not myvim:
-            error_msg += "\n    vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'],
-                                                                                                                   classification["datacenter_id"])
-            continue
-        depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
-        extra = {"params": (classification['vim_classification_id']), "depends_on": depends_on}
-        db_vim_action = {
-            "instance_action_id": instance_action_id,
-            "task_index": task_index,
-            "datacenter_vim_id": classification["datacenter_tenant_id"],
-            "action": "DELETE",
-            "status": "SCHEDULED",
-            "item": "instance_classifications",
-            "item_id": classification["uuid"],
-            "related": classification["related"],
-            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
-        }
-        task_index += 1
-        db_vim_actions.append(db_vim_action)
-
-    for sf in instanceDict.get('sfs', ()):
-        vimthread_affected[sf["datacenter_tenant_id"]] = None
-        datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
-        if datacenter_key not in myvims:
-            try:
-                _, myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
-            except NfvoException as e:
-                logger.error(str(e))
-                myvim_thread = None
-            myvim_threads[datacenter_key] = myvim_thread
-            vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
-                           datacenter_tenant_id=sf["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        myvim = myvims[datacenter_key]
-        myvim_thread = myvim_threads[datacenter_key]
-
-        if not myvim:
-            error_msg += "\n    vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
-            continue
-        depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
-        extra = {"params": (sf['vim_sf_id']), "depends_on": depends_on}
-        db_vim_action = {
-            "instance_action_id": instance_action_id,
-            "task_index": task_index,
-            "datacenter_vim_id": sf["datacenter_tenant_id"],
-            "action": "DELETE",
-            "status": "SCHEDULED",
-            "item": "instance_sfs",
-            "item_id": sf["uuid"],
-            "related": sf["related"],
-            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
-        }
-        task_index += 1
-        db_vim_actions.append(db_vim_action)
-
-    for sfi in instanceDict.get('sfis', ()):
-        vimthread_affected[sfi["datacenter_tenant_id"]] = None
-        datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
-        if datacenter_key not in myvims:
-            try:
-                _, myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
-            except NfvoException as e:
-                logger.error(str(e))
-                myvim_thread = None
-            myvim_threads[datacenter_key] = myvim_thread
-            vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
-                           datacenter_tenant_id=sfi["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        myvim = myvims[datacenter_key]
-        myvim_thread = myvim_threads[datacenter_key]
-
-        if not myvim:
-            error_msg += "\n    vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
-            continue
-        depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfs"]
-        extra = {"params": (sfi['vim_sfi_id']), "depends_on": depends_on}
-        db_vim_action = {
-            "instance_action_id": instance_action_id,
-            "task_index": task_index,
-            "datacenter_vim_id": sfi["datacenter_tenant_id"],
-            "action": "DELETE",
-            "status": "SCHEDULED",
-            "item": "instance_sfis",
-            "item_id": sfi["uuid"],
-            "related": sfi["related"],
-            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
-        }
-        task_index += 1
-        db_vim_actions.append(db_vim_action)
-
-    # 2.2 deleting VMs
-    # vm_fail_list=[]
-    for sce_vnf in instanceDict.get('vnfs', ()):
-        datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
-        vimthread_affected[sce_vnf["datacenter_tenant_id"]] = None
-        if datacenter_key not in myvims:
-            try:
-                _, myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
-            except NfvoException as e:
-                logger.error(str(e))
-                myvim_thread = None
-            myvim_threads[datacenter_key] = myvim_thread
-            vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
-                           datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
-                                                                                               sce_vnf["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        myvim = myvims[datacenter_key]
-        myvim_thread = myvim_threads[datacenter_key]
-
-        for vm in sce_vnf['vms']:
-            if not myvim:
-                error_msg += "\n    VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
-                continue
-            sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
-            db_vim_action = {
-                "instance_action_id": instance_action_id,
-                "task_index": task_index,
-                "datacenter_vim_id": sce_vnf["datacenter_tenant_id"],
-                "action": "DELETE",
-                "status": "SCHEDULED",
-                "item": "instance_vms",
-                "item_id": vm["uuid"],
-                "related": vm["related"],
-                "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
-                                        default_flow_style=True, width=256)
-            }
-            db_vim_actions.append(db_vim_action)
-            for interface in vm["interfaces"]:
-                if not interface.get("instance_net_id"):
-                    continue
-                if interface["instance_net_id"] not in net2vm_dependencies:
-                    net2vm_dependencies[interface["instance_net_id"]] = []
-                net2vm_dependencies[interface["instance_net_id"]].append(task_index)
-            task_index += 1
-
-    # 2.3 deleting NETS
-    # net_fail_list=[]
-    for net in instanceDict['nets']:
-        vimthread_affected[net["datacenter_tenant_id"]] = None
-        datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
-        if datacenter_key not in myvims:
-            try:
-                _,myvim_thread = get_vim_thread(mydb, tenant_id, net["datacenter_id"], net["datacenter_tenant_id"])
-            except NfvoException as e:
-                logger.error(str(e))
-                myvim_thread = None
-            myvim_threads[datacenter_key] = myvim_thread
-            vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
-                           datacenter_tenant_id=net["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        myvim = myvims[datacenter_key]
-        myvim_thread = myvim_threads[datacenter_key]
-
-        if not myvim:
-            error_msg += "\n    Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
-            continue
-        extra = {"params": (net['vim_net_id'], net['sdn_net_id'])}
-        if net2vm_dependencies.get(net["uuid"]):
-            extra["depends_on"] = net2vm_dependencies[net["uuid"]]
-        sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
-        if len(sfi_dependencies) > 0:
-            if "depends_on" in extra:
-                extra["depends_on"] += sfi_dependencies
-            else:
-                extra["depends_on"] = sfi_dependencies
-        db_vim_action = {
-            "instance_action_id": instance_action_id,
-            "task_index": task_index,
-            "datacenter_vim_id": net["datacenter_tenant_id"],
-            "action": "DELETE",
-            "status": "SCHEDULED",
-            "item": "instance_nets",
-            "item_id": net["uuid"],
-            "related": net["related"],
-            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
-        }
-        task_index += 1
-        db_vim_actions.append(db_vim_action)
-
-    db_instance_action["number_tasks"] = task_index
-
-    # --> WIM
-    wim_actions, db_instance_action = (
-        wim_engine.incorporate_actions(wim_actions, db_instance_action))
-    # <-- WIM
-
-    db_tables = [
-        {"instance_actions": db_instance_action},
-        {"vim_wim_actions": db_vim_actions + wim_actions}
-    ]
-
-    logger.debug("delete_instance done DB tables: %s",
-                 yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
-    mydb.new_rows(db_tables, ())
-    for myvim_thread_id in vimthread_affected.keys():
-        vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
-
-    wim_engine.dispatch(wim_actions)
-
-    if len(error_msg) > 0:
-        return 'action_id={} instance {} deleted but some elements could not be deleted, or already deleted '\
-               '(error: 404) from VIM: {}'.format(instance_action_id, message, error_msg)
-    else:
-        return "action_id={} instance {} deleted".format(instance_action_id, message)
-
-def get_instance_id(mydb, tenant_id, instance_id):
-    global ovim
-    #check valid tenant_id
-    check_tenant(mydb, tenant_id)
-    #obtain data
-
-    instance_dict = mydb.get_instance_scenario(instance_id, tenant_id, verbose=True)
-    for net in instance_dict["nets"]:
-        if net.get("sdn_net_id"):
-            net_sdn = ovim.show_network(net["sdn_net_id"])
-            net["sdn_info"] = {
-                "admin_state_up": net_sdn.get("admin_state_up"),
-                "flows": net_sdn.get("flows"),
-                "last_error": net_sdn.get("last_error"),
-                "ports": net_sdn.get("ports"),
-                "type": net_sdn.get("type"),
-                "status": net_sdn.get("status"),
-                "vlan": net_sdn.get("vlan"),
-            }
-    return instance_dict
-
-@deprecated("Instance is automatically refreshed by vim_threads")
-def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenant=None):
-    '''Refreshes a scenario instance. It modifies instanceDict'''
-    '''Returns:
-         - result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
-         - error_msg
-    '''
-    # # Assumption: nfvo_tenant and instance_id were checked before entering into this function
-    # #print "nfvo.refresh_instance begins"
-    # #print json.dumps(instanceDict, indent=4)
-    #
-    # #print "Getting the VIM URL and the VIM tenant_id"
-    # myvims={}
-    #
-    # # 1. Getting VIM vm and net list
-    # vms_updated = [] #List of VM instance uuids in openmano that were updated
-    # vms_notupdated=[]
-    # vm_list = {}
-    # for sce_vnf in instanceDict['vnfs']:
-    #     datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
-    #     if datacenter_key not in vm_list:
-    #         vm_list[datacenter_key] = []
-    #     if datacenter_key not in myvims:
-    #         vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
-    #                        datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
-    #         if len(vims) == 0:
-    #             logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
-    #             myvims[datacenter_key] = None
-    #         else:
-    #             myvims[datacenter_key] = vims.values()[0]
-    #     for vm in sce_vnf['vms']:
-    #         vm_list[datacenter_key].append(vm['vim_vm_id'])
-    #         vms_notupdated.append(vm["uuid"])
-    #
-    # nets_updated = [] #List of VM instance uuids in openmano that were updated
-    # nets_notupdated=[]
-    # net_list = {}
-    # for net in instanceDict['nets']:
-    #     datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
-    #     if datacenter_key not in net_list:
-    #         net_list[datacenter_key] = []
-    #     if datacenter_key not in myvims:
-    #         vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
-    #                        datacenter_tenant_id=net["datacenter_tenant_id"])
-    #         if len(vims) == 0:
-    #             logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
-    #             myvims[datacenter_key] = None
-    #         else:
-    #             myvims[datacenter_key] = vims.values()[0]
-    #
-    #     net_list[datacenter_key].append(net['vim_net_id'])
-    #     nets_notupdated.append(net["uuid"])
-    #
-    # # 1. Getting the status of all VMs
-    # vm_dict={}
-    # for datacenter_key in myvims:
-    #     if not vm_list.get(datacenter_key):
-    #         continue
-    #     failed = True
-    #     failed_message=""
-    #     if not myvims[datacenter_key]:
-    #         failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
-    #     else:
-    #         try:
-    #             vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
-    #             failed = False
-    #         except vimconn.vimconnException as e:
-    #             logger.error("VIM exception %s %s", type(e).__name__, str(e))
-    #             failed_message = str(e)
-    #     if failed:
-    #         for vm in vm_list[datacenter_key]:
-    #             vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
-    #
-    # # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
-    # for sce_vnf in instanceDict['vnfs']:
-    #     for vm in sce_vnf['vms']:
-    #         vm_id = vm['vim_vm_id']
-    #         interfaces = vm_dict[vm_id].pop('interfaces', [])
-    #         #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
-    #         has_mgmt_iface = False
-    #         for iface in vm["interfaces"]:
-    #             if iface["type"]=="mgmt":
-    #                 has_mgmt_iface = True
-    #         if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
-    #             vm_dict[vm_id]['status'] = "ACTIVE"
-    #         if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
-    #             vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
-    #         if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
-    #             vm['status']    = vm_dict[vm_id]['status']
-    #             vm['error_msg'] = vm_dict[vm_id].get('error_msg')
-    #             vm['vim_info']  = vm_dict[vm_id].get('vim_info')
-    #             # 2.1. Update in openmano DB the VMs whose status changed
-    #             try:
-    #                 updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
-    #                 vms_notupdated.remove(vm["uuid"])
-    #                 if updates>0:
-    #                     vms_updated.append(vm["uuid"])
-    #             except db_base_Exception as e:
-    #                 logger.error("nfvo.refresh_instance error database update: %s", str(e))
-    #         # 2.2. Update in openmano DB the interface VMs
-    #         for interface in interfaces:
-    #             #translate from vim_net_id to instance_net_id
-    #             network_id_list=[]
-    #             for net in instanceDict['nets']:
-    #                 if net["vim_net_id"] == interface["vim_net_id"]:
-    #                     network_id_list.append(net["uuid"])
-    #             if not network_id_list:
-    #                 continue
-    #             del interface["vim_net_id"]
-    #             try:
-    #                 for network_id in network_id_list:
-    #                     mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
-    #             except db_base_Exception as e:
-    #                 logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
-    #
-    # # 3. Getting the status of all nets
-    # net_dict = {}
-    # for datacenter_key in myvims:
-    #     if not net_list.get(datacenter_key):
-    #         continue
-    #     failed = True
-    #     failed_message = ""
-    #     if not myvims[datacenter_key]:
-    #         failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
-    #     else:
-    #         try:
-    #             net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
-    #             failed = False
-    #         except vimconn.vimconnException as e:
-    #             logger.error("VIM exception %s %s", type(e).__name__, str(e))
-    #             failed_message = str(e)
-    #     if failed:
-    #         for net in net_list[datacenter_key]:
-    #             net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
-    #
-    # # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
-    # # TODO: update nets inside a vnf
-    # for net in instanceDict['nets']:
-    #     net_id = net['vim_net_id']
-    #     if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
-    #         net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
-    #     if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
-    #         net['status']    = net_dict[net_id]['status']
-    #         net['error_msg'] = net_dict[net_id].get('error_msg')
-    #         net['vim_info']  = net_dict[net_id].get('vim_info')
-    #         # 5.1. Update in openmano DB the nets whose status changed
-    #         try:
-    #             updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
-    #             nets_notupdated.remove(net["uuid"])
-    #             if updated>0:
-    #                 nets_updated.append(net["uuid"])
-    #         except db_base_Exception as e:
-    #             logger.error("nfvo.refresh_instance error database update: %s", str(e))
-    #
-    # # Returns appropriate output
-    # #print "nfvo.refresh_instance finishes"
-    # logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
-    #             str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
-    instance_id = instanceDict['uuid']
-    # if len(vms_notupdated)+len(nets_notupdated)>0:
-    #     error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
-    #     return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
-
-    return 0, 'Scenario instance ' + instance_id + ' refreshed.'
-
-def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
-    #print "Checking that the instance_id exists and getting the instance dictionary"
-    instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
-    #print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
-
-    #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-    vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
-    if len(vims) == 0:
-        raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), httperrors.Not_Found)
-    myvim = vims.values()[0]
-    vm_result = {}
-    vm_error = 0
-    vm_ok = 0
-
-    myvim_threads_id = {}
-    if action_dict.get("vdu-scaling"):
-        db_instance_vms = []
-        db_vim_actions = []
-        db_instance_interfaces = []
-        instance_action_id = get_task_id()
-        db_instance_action = {
-            "uuid": instance_action_id,   # same uuid for the instance and the action on create
-            "tenant_id": nfvo_tenant,
-            "instance_id": instance_id,
-            "description": "SCALE",
-        }
-        vm_result["instance_action_id"] = instance_action_id
-        vm_result["created"] = []
-        vm_result["deleted"] = []
-        task_index = 0
-        for vdu in action_dict["vdu-scaling"]:
-            vdu_id = vdu.get("vdu-id")
-            osm_vdu_id = vdu.get("osm_vdu_id")
-            member_vnf_index = vdu.get("member-vnf-index")
-            vdu_count = vdu.get("count", 1)
-            if vdu_id:
-                target_vms = mydb.get_rows(
-                    FROM="instance_vms as vms join instance_vnfs as vnfs on vms.instance_vnf_id=vnfs.uuid",
-                    WHERE={"vms.uuid": vdu_id},
-                    ORDER_BY="vms.created_at"
-                )
-                if not target_vms:
-                    raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), httperrors.Not_Found)
-            else:
-                if not osm_vdu_id and not member_vnf_index:
-                    raise NfvoException("Invalid input vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
-                target_vms = mydb.get_rows(
-                    # SELECT=("ivms.uuid", "ivnfs.datacenter_id", "ivnfs.datacenter_tenant_id"),
-                    FROM="instance_vms as ivms join instance_vnfs as ivnfs on ivms.instance_vnf_id=ivnfs.uuid"\
-                         " join sce_vnfs as svnfs on ivnfs.sce_vnf_id=svnfs.uuid"\
-                         " join vms on ivms.vm_id=vms.uuid",
-                    WHERE={"vms.osm_id": osm_vdu_id, "svnfs.member_vnf_index": member_vnf_index,
-                           "ivnfs.instance_scenario_id": instance_id},
-                    ORDER_BY="ivms.created_at"
-                )
-                if not target_vms:
-                    raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), httperrors.Not_Found)
-                vdu_id = target_vms[-1]["uuid"]
-            target_vm = target_vms[-1]
-            datacenter = target_vm["datacenter_id"]
-            myvim_threads_id[datacenter], _ = get_vim_thread(mydb, nfvo_tenant, datacenter)
-
-            if vdu["type"] == "delete":
-                for index in range(0, vdu_count):
-                    target_vm = target_vms[-1-index]
-                    vdu_id = target_vm["uuid"]
-                    # look for nm
-                    vm_interfaces = None
-                    for sce_vnf in instanceDict['vnfs']:
-                        for vm in sce_vnf['vms']:
-                            if vm["uuid"] == vdu_id:
-                                # TODO revise this should not be vm["uuid"]   instance_vms["vm_id"]
-                                vm_interfaces = vm["interfaces"]
-                                break
-
-                    db_vim_action = {
-                        "instance_action_id": instance_action_id,
-                        "task_index": task_index,
-                        "datacenter_vim_id": target_vm["datacenter_tenant_id"],
-                        "action": "DELETE",
-                        "status": "SCHEDULED",
-                        "item": "instance_vms",
-                        "item_id": vdu_id,
-                        "related": target_vm["related"],
-                        "extra": yaml.safe_dump({"params": vm_interfaces},
-                                                default_flow_style=True, width=256)
-                    }
-                    task_index += 1
-                    db_vim_actions.append(db_vim_action)
-                    vm_result["deleted"].append(vdu_id)
-                    # delete from database
-                    db_instance_vms.append({"TO-DELETE": vdu_id})
-
-            else:  # vdu["type"] == "create":
-                iface2iface = {}
-                where = {"item": "instance_vms", "item_id": target_vm["uuid"], "action": "CREATE"}
-
-                vim_action_to_clone = mydb.get_rows(FROM="vim_wim_actions", WHERE=where)
-                if not vim_action_to_clone:
-                    raise NfvoException("Cannot find the vim_action at database with {}".format(where), httperrors.Internal_Server_Error)
-                vim_action_to_clone = vim_action_to_clone[0]
-                extra = yaml.safe_load(vim_action_to_clone["extra"])
-
-                # generate a new depends_on. Convert format TASK-Y into new format TASK-ACTION-XXXX.XXXX.Y
-                # TODO do the same for flavor and image when available
-                task_depends_on = []
-                task_params = extra["params"]
-                task_params_networks = deepcopy(task_params[5])
-                for iface in task_params[5]:
-                    if iface["net_id"].startswith("TASK-"):
-                        if "." not in iface["net_id"]:
-                            task_depends_on.append("{}.{}".format(vim_action_to_clone["instance_action_id"],
-                                                             iface["net_id"][5:]))
-                            iface["net_id"] = "TASK-{}.{}".format(vim_action_to_clone["instance_action_id"],
-                                                                  iface["net_id"][5:])
-                        else:
-                            task_depends_on.append(iface["net_id"][5:])
-                    if "mac_address" in iface:
-                        del iface["mac_address"]
-
-                vm_ifaces_to_clone = mydb.get_rows(FROM="instance_interfaces", WHERE={"instance_vm_id": target_vm["uuid"]})
-                for index in range(0, vdu_count):
-                    vm_uuid = str(uuid4())
-                    vm_name = target_vm.get('vim_name')
-                    try:
-                        suffix = vm_name.rfind("-")
-                        vm_name = vm_name[:suffix+1] + str(index + 1 + int(vm_name[suffix+1:]))
-                    except Exception:
-                        pass
-                    db_instance_vm = {
-                        "uuid": vm_uuid,
-                        'related': vm_uuid,
-                        'instance_vnf_id': target_vm['instance_vnf_id'],
-                        'vm_id': target_vm['vm_id'],
-                        'vim_name': vm_name,
-                    }
-                    db_instance_vms.append(db_instance_vm)
-
-                    for vm_iface in vm_ifaces_to_clone:
-                        iface_uuid = str(uuid4())
-                        iface2iface[vm_iface["uuid"]] = iface_uuid
-                        db_vm_iface = {
-                            "uuid": iface_uuid,
-                            'instance_vm_id': vm_uuid,
-                            "instance_net_id": vm_iface["instance_net_id"],
-                            'interface_id': vm_iface['interface_id'],
-                            'type': vm_iface['type'],
-                            'floating_ip': vm_iface['floating_ip'],
-                            'port_security': vm_iface['port_security']
-                        }
-                        db_instance_interfaces.append(db_vm_iface)
-                    task_params_copy = deepcopy(task_params)
-                    for iface in task_params_copy[5]:
-                        iface["uuid"] = iface2iface[iface["uuid"]]
-                        # increment ip_address
-                        if "ip_address" in iface:
-                            ip = iface.get("ip_address")
-                            i = ip.rfind(".")
-                            if i > 0:
-                                try:
-                                    i += 1
-                                    ip = ip[i:] + str(int(ip[:i]) + 1)
-                                    iface["ip_address"] = ip
-                                except:
-                                    iface["ip_address"] = None
-                    if vm_name:
-                        task_params_copy[0] = vm_name
-                    db_vim_action = {
-                        "instance_action_id": instance_action_id,
-                        "task_index": task_index,
-                        "datacenter_vim_id": vim_action_to_clone["datacenter_vim_id"],
-                        "action": "CREATE",
-                        "status": "SCHEDULED",
-                        "item": "instance_vms",
-                        "item_id": vm_uuid,
-                        "related": vm_uuid,
-                        # ALF
-                        # ALF
-                        # TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
-                        # ALF
-                        # ALF
-                        "extra": yaml.safe_dump({"params": task_params_copy, "depends_on": task_depends_on}, default_flow_style=True, width=256)
-                    }
-                    task_index += 1
-                    db_vim_actions.append(db_vim_action)
-                    vm_result["created"].append(vm_uuid)
-
-        db_instance_action["number_tasks"] = task_index
-        db_tables = [
-            {"instance_vms": db_instance_vms},
-            {"instance_interfaces": db_instance_interfaces},
-            {"instance_actions": db_instance_action},
-            # TODO revise sfps
-            # {"instance_sfis": db_instance_sfis},
-            # {"instance_sfs": db_instance_sfs},
-            # {"instance_classifications": db_instance_classifications},
-            # {"instance_sfps": db_instance_sfps},
-            {"vim_wim_actions": db_vim_actions}
-        ]
-        logger.debug("create_vdu done DB tables: %s",
-                     yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
-        mydb.new_rows(db_tables, [])
-        for myvim_thread in myvim_threads_id.values():
-            vim_threads["running"][myvim_thread].insert_task(db_vim_actions)
-
-        return vm_result
-
-    input_vnfs = action_dict.pop("vnfs", [])
-    input_vms = action_dict.pop("vms", [])
-    action_over_all = True if not input_vnfs and not input_vms else False
-    for sce_vnf in instanceDict['vnfs']:
-        for vm in sce_vnf['vms']:
-            if not action_over_all and sce_vnf['uuid'] not in input_vnfs and sce_vnf['vnf_name'] not in input_vnfs and \
-                    sce_vnf['member_vnf_index'] not in input_vnfs and \
-                    vm['uuid'] not in input_vms and vm['name'] not in input_vms and \
-                    sce_vnf['member_vnf_index'] + "-" + vm['vdu_osm_id'] not in input_vms:  # TODO conside vm_count_index
-                continue
-            try:
-                if "add_public_key" in action_dict:
-                    if sce_vnf.get('mgmt_access'):
-                        mgmt_access = yaml.load(sce_vnf['mgmt_access'])
-                        if not input_vms and mgmt_access.get("vdu-id") != vm['vdu_osm_id']:
-                            continue
-                        default_user = mgmt_access.get("default-user")
-                        password = mgmt_access.get("password")
-                        if mgmt_access.get(vm['vdu_osm_id']):
-                            default_user = mgmt_access[vm['vdu_osm_id']].get("default-user", default_user)
-                            password = mgmt_access[vm['vdu_osm_id']].get("password", password)
-
-                        tenant = mydb.get_rows_by_id('nfvo_tenants', nfvo_tenant)
-                        try:
-                            if 'ip_address' in vm:
-                                    mgmt_ip = vm['ip_address'].split(';')
-                                    priv_RO_key = decrypt_key(tenant[0]['encrypted_RO_priv_key'], tenant[0]['uuid'])
-                                    data  = myvim.inject_user_key(mgmt_ip[0], action_dict.get('user', default_user),
-                                                          action_dict['add_public_key'],
-                                                          password=password, ro_key=priv_RO_key)
-                                    vm_result[ vm['uuid'] ] = {"vim_result": 200,
-                                                       "description": "Public key injected",
-                                                       "name":vm['name']
-                                                    }
-
-                        except KeyError:
-                            raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
-                                                httperrors.Internal_Server_Error)
-                    else:
-                        raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
-                                            httperrors.Internal_Server_Error)
-                else:
-                    data = myvim.action_vminstance(vm['vim_vm_id'], action_dict)
-                    if "console" in action_dict:
-                        if not global_config["http_console_proxy"]:
-                            vm_result[ vm['uuid'] ] = {"vim_result": 200,
-                                                       "description": "{protocol}//{ip}:{port}/{suffix}".format(
-                                                                                    protocol=data["protocol"],
-                                                                                    ip = data["server"],
-                                                                                    port = data["port"],
-                                                                                    suffix = data["suffix"]),
-                                                       "name":vm['name']
-                                                    }
-                            vm_ok +=1
-                        elif data["server"]=="127.0.0.1" or data["server"]=="localhost":
-                            vm_result[ vm['uuid'] ] = {"vim_result": -httperrors.Unauthorized,
-                                                       "description": "this console is only reachable by local interface",
-                                                       "name":vm['name']
-                                                    }
-                            vm_error+=1
-                        else:
-                        #print "console data", data
-                            try:
-                                console_thread = create_or_use_console_proxy_thread(data["server"], data["port"])
-                                vm_result[ vm['uuid'] ] = {"vim_result": 200,
-                                                           "description": "{protocol}//{ip}:{port}/{suffix}".format(
-                                                                                        protocol=data["protocol"],
-                                                                                        ip = global_config["http_console_host"],
-                                                                                        port = console_thread.port,
-                                                                                        suffix = data["suffix"]),
-                                                           "name":vm['name']
-                                                        }
-                                vm_ok +=1
-                            except NfvoException as e:
-                                vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
-                                vm_error+=1
-
-                    else:
-                        vm_result[ vm['uuid'] ] = {"vim_result": 200, "description": "ok", "name":vm['name']}
-                        vm_ok +=1
-            except vimconn.vimconnException as e:
-                vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
-                vm_error+=1
-
-    if vm_ok==0: #all goes wrong
-        return vm_result
-    else:
-        return vm_result
-
-def instance_action_get(mydb, nfvo_tenant, instance_id, action_id):
-    filter = {}
-    if nfvo_tenant and nfvo_tenant != "any":
-        filter["tenant_id"] = nfvo_tenant
-    if instance_id and instance_id != "any":
-        filter["instance_id"] = instance_id
-    if action_id:
-        filter["uuid"] = action_id
-    rows = mydb.get_rows(FROM="instance_actions", WHERE=filter)
-    if action_id:
-        if not rows:
-            raise NfvoException("Not found any action with this criteria", httperrors.Not_Found)
-        vim_wim_actions = mydb.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": action_id})
-        rows[0]["vim_wim_actions"] = vim_wim_actions
-        # for backward compatibility set vim_actions = vim_wim_actions
-        rows[0]["vim_actions"] = vim_wim_actions
-    return {"actions": rows}
-
-
-def create_or_use_console_proxy_thread(console_server, console_port):
-    #look for a non-used port
-    console_thread_key = console_server + ":" + str(console_port)
-    if console_thread_key in global_config["console_thread"]:
-        #global_config["console_thread"][console_thread_key].start_timeout()
-        return global_config["console_thread"][console_thread_key]
-
-    for port in  global_config["console_port_iterator"]():
-        #print "create_or_use_console_proxy_thread() port:", port
-        if port in global_config["console_ports"]:
-            continue
-        try:
-            clithread = cli.ConsoleProxyThread(global_config['http_host'], port, console_server, console_port)
-            clithread.start()
-            global_config["console_thread"][console_thread_key] = clithread
-            global_config["console_ports"][port] = console_thread_key
-            return clithread
-        except cli.ConsoleProxyExceptionPortUsed as e:
-            #port used, try with onoher
-            continue
-        except cli.ConsoleProxyException as e:
-            raise NfvoException(str(e), httperrors.Bad_Request)
-    raise NfvoException("Not found any free 'http_console_ports'", httperrors.Conflict)
-
-
-def check_tenant(mydb, tenant_id):
-    '''check that tenant exists at database'''
-    tenant = mydb.get_rows(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
-    if not tenant:
-        raise NfvoException("tenant '{}' not found".format(tenant_id), httperrors.Not_Found)
-    return
-
-def new_tenant(mydb, tenant_dict):
-
-    tenant_uuid = str(uuid4())
-    tenant_dict['uuid'] = tenant_uuid
-    try:
-        pub_key, priv_key = create_RO_keypair(tenant_uuid)
-        tenant_dict['RO_pub_key'] = pub_key
-        tenant_dict['encrypted_RO_priv_key'] = priv_key
-        mydb.new_row("nfvo_tenants", tenant_dict, confidential_data=True)
-    except db_base_Exception as e:
-        raise NfvoException("Error creating the new tenant: {} ".format(tenant_dict['name']) + str(e), e.http_code)
-    return tenant_uuid
-
-def delete_tenant(mydb, tenant):
-    #get nfvo_tenant info
-
-    tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant, 'tenant')
-    mydb.delete_row_by_id("nfvo_tenants", tenant_dict['uuid'])
-    return tenant_dict['uuid'] + " " + tenant_dict["name"]
-
-
-def new_datacenter(mydb, datacenter_descriptor):
-    sdn_port_mapping = None
-    if "config" in datacenter_descriptor:
-        sdn_port_mapping = datacenter_descriptor["config"].pop("sdn-port-mapping", None)
-        datacenter_descriptor["config"] = yaml.safe_dump(datacenter_descriptor["config"], default_flow_style=True,
-                                                         width=256)
-    # Check that datacenter-type is correct
-    datacenter_type = datacenter_descriptor.get("type", "openvim");
-    # module_info = None
-    try:
-        module = "vimconn_" + datacenter_type
-        pkg = __import__("osm_ro." + module)
-        # vim_conn = getattr(pkg, module)
-        # module_info = imp.find_module(module, [__file__[:__file__.rfind("/")]])
-    except (IOError, ImportError):
-        # if module_info and module_info[0]:
-        #    file.close(module_info[0])
-        raise NfvoException("Incorrect datacenter type '{}'. Plugin '{}.py' not installed".format(datacenter_type,
-                                                                                                  module),
-                            httperrors.Bad_Request)
-
-    datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True)
-    if sdn_port_mapping:
-        try:
-            datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, sdn_port_mapping)
-        except Exception as e:
-            mydb.delete_row_by_id("datacenters", datacenter_id)   # Rollback
-            raise e
-    return datacenter_id
-
-
-def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
-    # obtain data, check that only one exist
-    datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
-
-    # edit data
-    datacenter_id = datacenter['uuid']
-    where = {'uuid': datacenter['uuid']}
-    remove_port_mapping = False
-    new_sdn_port_mapping = None
-    if "config" in datacenter_descriptor:
-        if datacenter_descriptor['config'] != None:
-            try:
-                new_config_dict = datacenter_descriptor["config"]
-                if "sdn-port-mapping" in new_config_dict:
-                    remove_port_mapping = True
-                    new_sdn_port_mapping = new_config_dict.pop("sdn-port-mapping")
-                # delete null fields
-                to_delete = []
-                for k in new_config_dict:
-                    if new_config_dict[k] is None:
-                        to_delete.append(k)
-                        if k == 'sdn-controller':
-                            remove_port_mapping = True
-
-                config_text = datacenter.get("config")
-                if not config_text:
-                    config_text = '{}'
-                config_dict = yaml.load(config_text)
-                config_dict.update(new_config_dict)
-                # delete null fields
-                for k in to_delete:
-                    del config_dict[k]
-            except Exception as e:
-                raise NfvoException("Bad format at datacenter:config " + str(e), httperrors.Bad_Request)
-        if config_dict:
-            datacenter_descriptor["config"] = yaml.safe_dump(config_dict, default_flow_style=True, width=256)
-        else:
-            datacenter_descriptor["config"] = None
-        if remove_port_mapping:
-            try:
-                datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
-            except ovimException as e:
-                raise NfvoException("Error deleting datacenter-port-mapping " + str(e), httperrors.Conflict)
-
-    mydb.update_rows('datacenters', datacenter_descriptor, where)
-    if new_sdn_port_mapping:
-        try:
-            datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, new_sdn_port_mapping)
-        except ovimException as e:
-            # Rollback
-            mydb.update_rows('datacenters', datacenter, where)
-            raise NfvoException("Error adding datacenter-port-mapping " + str(e), httperrors.Conflict)
-    return datacenter_id
-
-
-def delete_datacenter(mydb, datacenter):
-    #get nfvo_tenant info
-    datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
-    mydb.delete_row_by_id("datacenters", datacenter_dict['uuid'])
-    try:
-        datacenter_sdn_port_mapping_delete(mydb, None, datacenter_dict['uuid'])
-    except ovimException as e:
-        raise NfvoException("Error deleting datacenter-port-mapping " + str(e))
-    return datacenter_dict['uuid'] + " " + datacenter_dict['name']
-
-
-def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None, vim_tenant=None, vim_tenant_name=None,
-                       vim_username=None, vim_password=None, config=None):
-    # get datacenter info
-    try:
-        if not datacenter_id:
-            if not vim_id:
-                raise NfvoException("You must provide 'vim_id", http_code=httperrors.Bad_Request)
-            datacenter_id = vim_id
-        datacenter_id, datacenter_name = get_datacenter_uuid(mydb, None, datacenter_id)
-
-        create_vim_tenant = True if not vim_tenant and not vim_tenant_name else False
-
-        # get nfvo_tenant info
-        tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
-        if vim_tenant_name==None:
-            vim_tenant_name=tenant_dict['name']
-
-        tenants_datacenter_dict={"nfvo_tenant_id":tenant_dict['uuid'], "datacenter_id":datacenter_id }
-        # #check that this association does not exist before
-        # tenants_datacenters = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
-        # if len(tenants_datacenters)>0:
-        #     raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Conflict)
-
-        vim_tenant_id_exist_atdb=False
-        if not create_vim_tenant:
-            where_={"datacenter_id": datacenter_id}
-            if vim_tenant!=None:
-                where_["vim_tenant_id"] = vim_tenant
-            if vim_tenant_name!=None:
-                where_["vim_tenant_name"] = vim_tenant_name
-            #check if vim_tenant_id is already at database
-            datacenter_tenants_dict = mydb.get_rows(FROM='datacenter_tenants', WHERE=where_)
-            if len(datacenter_tenants_dict)>=1:
-                datacenter_tenants_dict = datacenter_tenants_dict[0]
-                vim_tenant_id_exist_atdb=True
-                #TODO check if a field has changed and edit entry at datacenter_tenants at DB
-            else: #result=0
-                datacenter_tenants_dict = {}
-                #insert at table datacenter_tenants
-        else: #if vim_tenant==None:
-            #create tenant at VIM if not provided
-            try:
-                _, myvim = get_datacenter_by_name_uuid(mydb, None, datacenter, vim_user=vim_username,
-                                                                   vim_passwd=vim_password)
-                datacenter_name = myvim["name"]
-                vim_tenant = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
-            except vimconn.vimconnException as e:
-                raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant_id, str(e)), httperrors.Internal_Server_Error)
-            datacenter_tenants_dict = {}
-            datacenter_tenants_dict["created"]="true"
-
-        #fill datacenter_tenants table
-        if not vim_tenant_id_exist_atdb:
-            datacenter_tenants_dict["vim_tenant_id"] = vim_tenant
-            datacenter_tenants_dict["vim_tenant_name"] = vim_tenant_name
-            datacenter_tenants_dict["user"] = vim_username
-            datacenter_tenants_dict["passwd"] = vim_password
-            datacenter_tenants_dict["datacenter_id"] = datacenter_id
-            if name:
-                datacenter_tenants_dict["name"] = name
-            else:
-                datacenter_tenants_dict["name"] = datacenter_name
-            if config:
-                datacenter_tenants_dict["config"] = yaml.safe_dump(config, default_flow_style=True, width=256)
-            id_ = mydb.new_row('datacenter_tenants', datacenter_tenants_dict, add_uuid=True, confidential_data=True)
-            datacenter_tenants_dict["uuid"] = id_
-
-        #fill tenants_datacenters table
-        datacenter_tenant_id = datacenter_tenants_dict["uuid"]
-        tenants_datacenter_dict["datacenter_tenant_id"] = datacenter_tenant_id
-        mydb.new_row('tenants_datacenters', tenants_datacenter_dict)
-
-        # create thread
-        thread_name = get_non_used_vim_name(datacenter_name, datacenter_id, tenant_dict['name'], tenant_dict['uuid'])
-        new_thread = vim_thread.vim_thread(task_lock, thread_name, datacenter_name, datacenter_tenant_id,
-                                           db=db, db_lock=db_lock, ovim=ovim)
-        new_thread.start()
-        thread_id = datacenter_tenants_dict["uuid"]
-        vim_threads["running"][thread_id] = new_thread
-        return thread_id
-    except vimconn.vimconnException as e:
-        raise NfvoException(str(e), httperrors.Bad_Request)
-
-
-def edit_vim_account(mydb, nfvo_tenant, datacenter_tenant_id, datacenter_id=None, name=None, vim_tenant=None,
-                              vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
-
-    # get vim_account; check is valid for this tenant
-    from_ = "datacenter_tenants as dt JOIN tenants_datacenters as td ON dt.uuid=td.datacenter_tenant_id"
-    where_ = {"td.nfvo_tenant_id": nfvo_tenant}
-    if datacenter_tenant_id:
-        where_["dt.uuid"] = datacenter_tenant_id
-    if datacenter_id:
-        where_["dt.datacenter_id"] = datacenter_id
-    vim_accounts = mydb.get_rows(SELECT="dt.uuid as uuid, config", FROM=from_, WHERE=where_)
-    if not vim_accounts:
-        raise NfvoException("vim_account not found for this tenant", http_code=httperrors.Not_Found)
-    elif len(vim_accounts) > 1:
-        raise NfvoException("found more than one vim_account for this tenant", http_code=httperrors.Conflict)
-    datacenter_tenant_id = vim_accounts[0]["uuid"]
-    original_config = vim_accounts[0]["config"]
-
-    update_ = {}
-    if config:
-        original_config_dict = yaml.load(original_config)
-        original_config_dict.update(config)
-        update["config"] = yaml.safe_dump(original_config_dict, default_flow_style=True, width=256)
-    if name:
-        update_['name'] = name
-    if vim_tenant:
-        update_['vim_tenant_id'] = vim_tenant
-    if vim_tenant_name:
-        update_['vim_tenant_name'] = vim_tenant_name
-    if vim_username:
-        update_['user'] = vim_username
-    if vim_password:
-        update_['passwd'] = vim_password
-    if update_:
-        mydb.update_rows("datacenter_tenants", UPDATE=update_, WHERE={"uuid": datacenter_tenant_id})
-
-    vim_threads["running"][datacenter_tenant_id].insert_task("reload")
-    return datacenter_tenant_id
-
-def delete_vim_account(mydb, tenant_id, vim_account_id, datacenter=None):
-    #get nfvo_tenant info
-    if not tenant_id or tenant_id=="any":
-        tenant_uuid = None
-    else:
-        tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
-        tenant_uuid = tenant_dict['uuid']
-
-    #check that this association exist before
-    tenants_datacenter_dict = {}
-    if datacenter:
-        datacenter_id, _ = get_datacenter_uuid(mydb, tenant_uuid, datacenter)
-        tenants_datacenter_dict["datacenter_id"] = datacenter_id
-    elif vim_account_id:
-        tenants_datacenter_dict["datacenter_tenant_id"] = vim_account_id
-    if tenant_uuid:
-        tenants_datacenter_dict["nfvo_tenant_id"] = tenant_uuid
-    tenant_datacenter_list = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
-    if len(tenant_datacenter_list)==0 and tenant_uuid:
-        raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Not_Found)
-
-    #delete this association
-    mydb.delete_row(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
-
-    #get vim_tenant info and deletes
-    warning=''
-    for tenant_datacenter_item in tenant_datacenter_list:
-        vim_tenant_dict = mydb.get_table_by_uuid_name('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
-        #try to delete vim:tenant
-        try:
-            mydb.delete_row_by_id('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
-            if vim_tenant_dict['created']=='true':
-                #delete tenant at VIM if created by NFVO
-                try:
-                    datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-                    myvim.delete_tenant(vim_tenant_dict['vim_tenant_id'])
-                except vimconn.vimconnException as e:
-                    warning = "Not possible to delete vim_tenant_id {} from VIM: {} ".format(vim_tenant_dict['vim_tenant_id'], str(e))
-                    logger.warn(warning)
-        except db_base_Exception as e:
-            logger.error("Cannot delete datacenter_tenants " + str(e))
-            pass  # the error will be caused because dependencies, vim_tenant can not be deleted
-        thread_id = tenant_datacenter_item["datacenter_tenant_id"]
-        thread = vim_threads["running"].get(thread_id)
-        if thread:
-            thread.insert_task("exit")
-            vim_threads["deleting"][thread_id] = thread
-    return "datacenter {} detached. {}".format(datacenter_id, warning)
-
-
-def datacenter_action(mydb, tenant_id, datacenter, action_dict):
-    #DEPRECATED
-    #get datacenter info
-    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-
-    if 'check-connectivity' in action_dict:
-        try:
-            myvim.check_vim_connectivity()
-        except vimconn.vimconnException as e:
-            #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
-            raise NfvoException(str(e), e.http_code)
-    elif 'net-update' in action_dict:
-        try:
-            nets = myvim.get_network_list(filter_dict={'shared': True, 'admin_state_up': True, 'status': 'ACTIVE'})
-            #print content
-        except vimconn.vimconnException as e:
-            #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
-            raise NfvoException(str(e), httperrors.Internal_Server_Error)
-        #update nets Change from VIM format to NFVO format
-        net_list=[]
-        for net in nets:
-            net_nfvo={'datacenter_id': datacenter_id}
-            net_nfvo['name']       = net['name']
-            #net_nfvo['description']= net['name']
-            net_nfvo['vim_net_id'] = net['id']
-            net_nfvo['type']       = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man')  to ('bridge','data','ptp')
-            net_nfvo['shared']     = net['shared']
-            net_nfvo['multipoint'] = False if net['type']=='ptp' else True
-            net_list.append(net_nfvo)
-        inserted, deleted = mydb.update_datacenter_nets(datacenter_id, net_list)
-        logger.info("Inserted %d nets, deleted %d old nets", inserted, deleted)
-        return inserted
-    elif 'net-edit' in action_dict:
-        net = action_dict['net-edit'].pop('net')
-        what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
-        result = mydb.update_rows('datacenter_nets', action_dict['net-edit'],
-                                WHERE={'datacenter_id':datacenter_id, what: net})
-        return result
-    elif 'net-delete' in action_dict:
-        net = action_dict['net-deelte'].get('net')
-        what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
-        result = mydb.delete_row(FROM='datacenter_nets',
-                                WHERE={'datacenter_id':datacenter_id, what: net})
-        return result
-
-    else:
-        raise NfvoException("Unknown action " + str(action_dict), httperrors.Bad_Request)
-
-
-def datacenter_edit_netmap(mydb, tenant_id, datacenter, netmap, action_dict):
-    #get datacenter info
-    datacenter_id, _  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-
-    what = 'uuid' if utils.check_valid_uuid(netmap) else 'name'
-    result = mydb.update_rows('datacenter_nets', action_dict['netmap'],
-                            WHERE={'datacenter_id':datacenter_id, what: netmap})
-    return result
-
-
-def datacenter_new_netmap(mydb, tenant_id, datacenter, action_dict=None):
-    #get datacenter info
-    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-    filter_dict={}
-    if action_dict:
-        action_dict = action_dict["netmap"]
-        if 'vim_id' in action_dict:
-            filter_dict["id"] = action_dict['vim_id']
-        if 'vim_name' in action_dict:
-            filter_dict["name"] = action_dict['vim_name']
-    else:
-        filter_dict["shared"] = True
-
-    try:
-        vim_nets = myvim.get_network_list(filter_dict=filter_dict)
-    except vimconn.vimconnException as e:
-        #logger.error("nfvo.datacenter_new_netmap() Not possible to get_network_list from VIM: %s ", str(e))
-        raise NfvoException(str(e), httperrors.Internal_Server_Error)
-    if len(vim_nets)>1 and action_dict:
-        raise NfvoException("more than two networks found, specify with vim_id", httperrors.Conflict)
-    elif len(vim_nets)==0: # and action_dict:
-        raise NfvoException("Not found a network at VIM with " + str(filter_dict), httperrors.Not_Found)
-    net_list=[]
-    for net in vim_nets:
-        net_nfvo={'datacenter_id': datacenter_id}
-        if action_dict and "name" in action_dict:
-            net_nfvo['name']       = action_dict['name']
-        else:
-            net_nfvo['name']       = net['name']
-        #net_nfvo['description']= net['name']
-        net_nfvo['vim_net_id'] = net['id']
-        net_nfvo['type']       = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man')  to ('bridge','data','ptp')
-        net_nfvo['shared']     = net['shared']
-        net_nfvo['multipoint'] = False if net['type']=='ptp' else True
-        try:
-            net_id = mydb.new_row("datacenter_nets", net_nfvo, add_uuid=True)
-            net_nfvo["status"] = "OK"
-            net_nfvo["uuid"] = net_id
-        except db_base_Exception as e:
-            if action_dict:
-                raise
-            else:
-                net_nfvo["status"] = "FAIL: " + str(e)
-        net_list.append(net_nfvo)
-    return net_list
-
-def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
-    # obtain all network data
-    try:
-        if utils.check_valid_uuid(network_id):
-            filter_dict = {"id": network_id}
-        else:
-            filter_dict = {"name": network_id}
-
-        datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-        network = myvim.get_network_list(filter_dict=filter_dict)
-    except vimconn.vimconnException as e:
-        raise NfvoException("Not possible to get_sdn_net_id from VIM: {}".format(str(e)), e.http_code)
-
-    # ensure the network is defined
-    if len(network) == 0:
-        raise NfvoException("Network {} is not present in the system".format(network_id),
-                            httperrors.Bad_Request)
-
-    # ensure there is only one network with the provided name
-    if len(network) > 1:
-        raise NfvoException("Multiple networks present in vim identified by {}".format(network_id), httperrors.Bad_Request)
-
-    # ensure it is a dataplane network
-    if network[0]['type'] != 'data':
-        return None
-
-    # ensure we use the id
-    network_id = network[0]['id']
-
-    # search in dabase mano_db in table instance nets for the sdn_net_id that corresponds to the vim_net_id==network_id
-    # and with instance_scenario_id==NULL
-    #search_dict = {'vim_net_id': network_id, 'instance_scenario_id': None}
-    search_dict = {'vim_net_id': network_id}
-
-    try:
-        #sdn_network_id = mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)[0]['sdn_net_id']
-        result =  mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)
-    except db_base_Exception as e:
-        raise NfvoException("db_base_Exception obtaining SDN network to associated to vim network {}".format(
-            network_id) + str(e), e.http_code)
-
-    sdn_net_counter = 0
-    for net in result:
-        if net['sdn_net_id'] != None:
-            sdn_net_counter+=1
-            sdn_net_id = net['sdn_net_id']
-
-    if sdn_net_counter == 0:
-        return None
-    elif sdn_net_counter == 1:
-        return sdn_net_id
-    else:
-        raise NfvoException("More than one SDN network is associated to vim network {}".format(
-            network_id), httperrors.Internal_Server_Error)
-
-def get_sdn_controller_id(mydb, datacenter):
-    # Obtain sdn controller id
-    config = mydb.get_rows(SELECT=('config',), FROM='datacenters', WHERE={'uuid': datacenter})[0].get('config', '{}')
-    if not config:
-        return None
-
-    return yaml.load(config).get('sdn-controller')
-
-def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
-    try:
-        sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
-        if not sdn_network_id:
-            raise NfvoException("No SDN network is associated to vim-network {}".format(network_id), httperrors.Internal_Server_Error)
-
-        #Obtain sdn controller id
-        controller_id = get_sdn_controller_id(mydb, datacenter)
-        if not controller_id:
-            raise NfvoException("No SDN controller is set for datacenter {}".format(datacenter), httperrors.Internal_Server_Error)
-
-        #Obtain sdn controller info
-        sdn_controller = ovim.show_of_controller(controller_id)
-
-        port_data = {
-            'name': 'external_port',
-            'net_id': sdn_network_id,
-            'ofc_id': controller_id,
-            'switch_dpid': sdn_controller['dpid'],
-            'switch_port': descriptor['port']
-        }
-
-        if 'vlan' in descriptor:
-            port_data['vlan'] = descriptor['vlan']
-        if 'mac' in descriptor:
-            port_data['mac'] = descriptor['mac']
-
-        result = ovim.new_port(port_data)
-    except ovimException as e:
-        raise NfvoException("ovimException attaching SDN network {} to vim network {}".format(
-            sdn_network_id, network_id) + str(e), httperrors.Internal_Server_Error)
-    except db_base_Exception as e:
-        raise NfvoException("db_base_Exception attaching SDN network to vim network {}".format(
-            network_id) + str(e), e.http_code)
-
-    return 'Port uuid: '+ result
-
-def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
-    if port_id:
-        filter = {'uuid': port_id}
-    else:
-        sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
-        if not sdn_network_id:
-            raise NfvoException("No SDN network is associated to vim-network {}".format(network_id),
-                                httperrors.Internal_Server_Error)
-        #in case no port_id is specified only ports marked as 'external_port' will be detached
-        filter = {'name': 'external_port', 'net_id': sdn_network_id}
-
-    try:
-        port_list = ovim.get_ports(columns={'uuid'}, filter=filter)
-    except ovimException as e:
-        raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
-                            httperrors.Internal_Server_Error)
-
-    if len(port_list) == 0:
-        raise NfvoException("No ports attached to the network {} were found with the requested criteria".format(network_id),
-                            httperrors.Bad_Request)
-
-    port_uuid_list = []
-    for port in port_list:
-        try:
-            port_uuid_list.append(port['uuid'])
-            ovim.delete_port(port['uuid'])
-        except ovimException as e:
-            raise NfvoException("ovimException deleting port {} for net {}. ".format(port['uuid'], network_id) + str(e), httperrors.Internal_Server_Error)
-
-    return 'Detached ports uuid: {}'.format(','.join(port_uuid_list))
-
-def vim_action_get(mydb, tenant_id, datacenter, item, name):
-    #get datacenter info
-    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-    filter_dict={}
-    if name:
-        if utils.check_valid_uuid(name):
-            filter_dict["id"] = name
-        else:
-            filter_dict["name"] = name
-    try:
-        if item=="networks":
-            #filter_dict['tenant_id'] = myvim['tenant_id']
-            content = myvim.get_network_list(filter_dict=filter_dict)
-
-            if len(content) == 0:
-                raise NfvoException("Network {} is not present in the system. ".format(name),
-                                    httperrors.Bad_Request)
-
-            #Update the networks with the attached ports
-            for net in content:
-                sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, net['id'])
-                if sdn_network_id != None:
-                    try:
-                        #port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan'}, filter={'name': 'external_port', 'net_id': sdn_network_id})
-                        port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan','name'}, filter={'net_id': sdn_network_id})
-                    except ovimException as e:
-                        raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e), httperrors.Internal_Server_Error)
-                    #Remove field name and if port name is external_port save it as 'type'
-                    for port in port_list:
-                        if port['name'] == 'external_port':
-                            port['type'] = "External"
-                        del port['name']
-                    net['sdn_network_id'] = sdn_network_id
-                    net['sdn_attached_ports'] = port_list
-
-        elif item=="tenants":
-            content = myvim.get_tenant_list(filter_dict=filter_dict)
-        elif item == "images":
-
-            content = myvim.get_image_list(filter_dict=filter_dict)
-        else:
-            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
-        logger.debug("vim_action response %s", content) #update nets Change from VIM format to NFVO format
-        if name and len(content)==1:
-            return {item[:-1]: content[0]}
-        elif name and len(content)==0:
-            raise NfvoException("No {} found with ".format(item[:-1]) + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), filter_dict.iteritems())),
-                 datacenter)
-        else:
-            return {item: content}
-    except vimconn.vimconnException as e:
-        print "vim_action Not possible to get_%s_list from VIM: %s " % (item, str(e))
-        raise NfvoException("Not possible to get_{}_list from VIM: {}".format(item, str(e)), e.http_code)
-
-
-def vim_action_delete(mydb, tenant_id, datacenter, item, name):
-    #get datacenter info
-    if tenant_id == "any":
-        tenant_id=None
-
-    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-    #get uuid name
-    content = vim_action_get(mydb, tenant_id, datacenter, item, name)
-    logger.debug("vim_action_delete vim response: " + str(content))
-    items = content.values()[0]
-    if type(items)==list and len(items)==0:
-        raise NfvoException("Not found " + item, httperrors.Not_Found)
-    elif type(items)==list and len(items)>1:
-        raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), httperrors.Not_Found)
-    else: # it is a dict
-        item_id = items["id"]
-        item_name = str(items.get("name"))
-
-    try:
-        if item=="networks":
-            # If there is a SDN network associated to the vim-network, proceed to clear the relationship and delete it
-            sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, item_id)
-            if sdn_network_id != None:
-                #Delete any port attachment to this network
-                try:
-                    port_list = ovim.get_ports(columns={'uuid'}, filter={'net_id': sdn_network_id})
-                except ovimException as e:
-                    raise NfvoException(
-                        "ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
-                        httperrors.Internal_Server_Error)
-
-                # By calling one by one all ports to be detached we ensure that not only the external_ports get detached
-                for port in port_list:
-                    vim_net_sdn_detach(mydb, tenant_id, datacenter, item_id, port['uuid'])
-
-                #Delete from 'instance_nets' the correspondence between the vim-net-id and the sdn-net-id
-                try:
-                    mydb.delete_row(FROM='instance_nets', WHERE={'instance_scenario_id': None, 'sdn_net_id': sdn_network_id, 'vim_net_id': item_id})
-                except db_base_Exception as e:
-                    raise NfvoException("Error deleting correspondence for VIM/SDN dataplane networks{}: ".format(correspondence) +
-                                        str(e), e.http_code)
-
-                #Delete the SDN network
-                try:
-                    ovim.delete_network(sdn_network_id)
-                except ovimException as e:
-                    logger.error("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e), exc_info=True)
-                    raise NfvoException("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e),
-                                        httperrors.Internal_Server_Error)
-
-            content = myvim.delete_network(item_id)
-        elif item=="tenants":
-            content = myvim.delete_tenant(item_id)
-        elif item == "images":
-            content = myvim.delete_image(item_id)
-        else:
-            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
-    except vimconn.vimconnException as e:
-        #logger.error( "vim_action Not possible to delete_{} {}from VIM: {} ".format(item, name, str(e)))
-        raise NfvoException("Not possible to delete_{} {} from VIM: {}".format(item, name, str(e)), e.http_code)
-
-    return "{} {} {} deleted".format(item[:-1], item_id,item_name)
-
-
-def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
-    #get datacenter info
-    logger.debug("vim_action_create descriptor %s", str(descriptor))
-    if tenant_id == "any":
-        tenant_id=None
-    datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
-    try:
-        if item=="networks":
-            net = descriptor["network"]
-            net_name = net.pop("name")
-            net_type = net.pop("type", "bridge")
-            net_public = net.pop("shared", False)
-            net_ipprofile = net.pop("ip_profile", None)
-            net_vlan = net.pop("vlan", None)
-            content, _ = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, vlan=net_vlan) #, **net)
-
-            #If the datacenter has a SDN controller defined and the network is of dataplane type, then create the sdn network
-            if get_sdn_controller_id(mydb, datacenter) != None and (net_type == 'data' or net_type == 'ptp'):
-                #obtain datacenter_tenant_id
-                datacenter_tenant_id = mydb.get_rows(SELECT=('uuid',),
-                                                     FROM='datacenter_tenants',
-                                                     WHERE={'datacenter_id': datacenter})[0]['uuid']
-                try:
-                    sdn_network = {}
-                    sdn_network['vlan'] = net_vlan
-                    sdn_network['type'] = net_type
-                    sdn_network['name'] = net_name
-                    sdn_network['region'] = datacenter_tenant_id
-                    ovim_content  = ovim.new_network(sdn_network)
-                except ovimException as e:
-                    logger.error("ovimException creating SDN network={} ".format(
-                        sdn_network) + str(e), exc_info=True)
-                    raise NfvoException("ovimException creating SDN network={} ".format(sdn_network) + str(e),
-                                        httperrors.Internal_Server_Error)
-
-                # Save entry in in dabase mano_db in table instance_nets to stablish a dictionary  vim_net_id <->sdn_net_id
-                # use instance_scenario_id=None to distinguish from real instaces of nets
-                correspondence = {'instance_scenario_id': None,
-                                  'sdn_net_id': ovim_content,
-                                  'vim_net_id': content,
-                                  'datacenter_tenant_id': datacenter_tenant_id
-                                  }
-                try:
-                    mydb.new_row('instance_nets', correspondence, add_uuid=True)
-                except db_base_Exception as e:
-                    raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: {}".format(
-                        correspondence, e), e.http_code)
-        elif item=="tenants":
-            tenant = descriptor["tenant"]
-            content = myvim.new_tenant(tenant["name"], tenant.get("description"))
-        else:
-            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
-    except vimconn.vimconnException as e:
-        raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
-
-    return vim_action_get(mydb, tenant_id, datacenter, item, content)
-
-def sdn_controller_create(mydb, tenant_id, sdn_controller):
-    data = ovim.new_of_controller(sdn_controller)
-    logger.debug('New SDN controller created with uuid {}'.format(data))
-    return data
-
-def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
-    data = ovim.edit_of_controller(controller_id, sdn_controller)
-    msg = 'SDN controller {} updated'.format(data)
-    logger.debug(msg)
-    return msg
-
-def sdn_controller_list(mydb, tenant_id, controller_id=None):
-    if controller_id == None:
-        data = ovim.get_of_controllers()
-    else:
-        data = ovim.show_of_controller(controller_id)
-
-    msg = 'SDN controller list:\n {}'.format(data)
-    logger.debug(msg)
-    return data
-
-def sdn_controller_delete(mydb, tenant_id, controller_id):
-    select_ = ('uuid', 'config')
-    datacenters = mydb.get_rows(FROM='datacenters', SELECT=select_)
-    for datacenter in datacenters:
-        if datacenter['config']:
-            config = yaml.load(datacenter['config'])
-            if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
-                raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), httperrors.Conflict)
-
-    data = ovim.delete_of_controller(controller_id)
-    msg = 'SDN controller {} deleted'.format(data)
-    logger.debug(msg)
-    return msg
-
-def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
-    controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
-    if len(controller) < 1:
-        raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), httperrors.Not_Found)
-
-    try:
-        sdn_controller_id = yaml.load(controller[0]["config"])["sdn-controller"]
-    except:
-        raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), httperrors.Bad_Request)
-
-    sdn_controller = ovim.show_of_controller(sdn_controller_id)
-    switch_dpid = sdn_controller["dpid"]
-
-    maps = list()
-    for compute_node in sdn_port_mapping:
-        #element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
-        element = dict()
-        element["compute_node"] = compute_node["compute_node"]
-        for port in compute_node["ports"]:
-            pci = port.get("pci")
-            element["switch_port"] = port.get("switch_port")
-            element["switch_mac"] = port.get("switch_mac")
-            if not element["switch_port"] and not element["switch_mac"]:
-                raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
-            for pci_expanded in utils.expand_brackets(pci):
-                element["pci"] = pci_expanded
-                maps.append(dict(element))
-
-    return ovim.set_of_port_mapping(maps, ofc_id=sdn_controller_id, switch_dpid=switch_dpid, region=datacenter_id)
-
-def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
-    maps = ovim.get_of_port_mappings(db_filter={"region": datacenter_id})
-
-    result = {
-        "sdn-controller": None,
-        "datacenter-id": datacenter_id,
-        "dpid": None,
-        "ports_mapping": list()
-    }
-
-    datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id)
-    if datacenter['config']:
-        config = yaml.load(datacenter['config'])
-        if 'sdn-controller' in config:
-            controller_id = config['sdn-controller']
-            sdn_controller = sdn_controller_list(mydb, tenant_id, controller_id)
-            result["sdn-controller"] = controller_id
-            result["dpid"] = sdn_controller["dpid"]
-
-    if result["sdn-controller"] == None:
-        raise NfvoException("SDN controller is not defined for datacenter {}".format(datacenter_id), httperrors.Bad_Request)
-    if result["dpid"] == None:
-        raise NfvoException("It was not possible to determine DPID for SDN controller {}".format(result["sdn-controller"]),
-                        httperrors.Internal_Server_Error)
-
-    if len(maps) == 0:
-        return result
-
-    ports_correspondence_dict = dict()
-    for link in maps:
-        if result["sdn-controller"] != link["ofc_id"]:
-            raise NfvoException("The sdn-controller specified for different port mappings differ", httperrors.Internal_Server_Error)
-        if result["dpid"] != link["switch_dpid"]:
-            raise NfvoException("The dpid specified for different port mappings differ", httperrors.Internal_Server_Error)
-        element = dict()
-        element["pci"] = link["pci"]
-        if link["switch_port"]:
-            element["switch_port"] = link["switch_port"]
-        if link["switch_mac"]:
-            element["switch_mac"] = link["switch_mac"]
-
-        if not link["compute_node"] in ports_correspondence_dict:
-            content = dict()
-            content["compute_node"] = link["compute_node"]
-            content["ports"] = list()
-            ports_correspondence_dict[link["compute_node"]] = content
-
-        ports_correspondence_dict[link["compute_node"]]["ports"].append(element)
-
-    for key in sorted(ports_correspondence_dict):
-        result["ports_mapping"].append(ports_correspondence_dict[key])
-
-    return result
-
-def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
-    return ovim.clear_of_port_mapping(db_filter={"region":datacenter_id})
-
-def create_RO_keypair(tenant_id):
-    """
-    Creates a public / private keys for a RO tenant and returns their values
-    Params:
-        tenant_id: ID of the tenant
-    Return:
-        public_key: Public key for the RO tenant
-        private_key: Encrypted private key for RO tenant
-    """
-
-    bits = 2048
-    key = RSA.generate(bits)
-    try:
-        public_key = key.publickey().exportKey('OpenSSH')
-        if isinstance(public_key, ValueError):
-            raise NfvoException("Unable to create public key: {}".format(public_key), httperrors.Internal_Server_Error)
-        private_key = key.exportKey(passphrase=tenant_id, pkcs=8)
-    except (ValueError, NameError) as e:
-        raise NfvoException("Unable to create private key: {}".format(e), httperrors.Internal_Server_Error)
-    return public_key, private_key
-
-def decrypt_key (key, tenant_id):
-    """
-    Decrypts an encrypted RSA key
-    Params:
-        key: Private key to be decrypted
-        tenant_id: ID of the tenant
-    Return:
-        unencrypted_key: Unencrypted private key for RO tenant
-    """
-    try:
-        key = RSA.importKey(key,tenant_id)
-        unencrypted_key = key.exportKey('PEM')
-        if isinstance(unencrypted_key, ValueError):
-            raise NfvoException("Unable to decrypt the private key: {}".format(unencrypted_key), httperrors.Internal_Server_Error)
-    except ValueError as e:
-        raise NfvoException("Unable to decrypt the private key: {}".format(e), httperrors.Internal_Server_Error)
-    return unencrypted_key
diff --git a/osm_ro/nfvo_db.py b/osm_ro/nfvo_db.py
deleted file mode 100644 (file)
index eb72b13..0000000
+++ /dev/null
@@ -1,1175 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-NFVO DB engine. It implements all the methods to interact with the Openmano Database
-"""
-__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
-__date__ ="$28-aug-2014 10:05:01$"
-
-import db_base
-import MySQLdb as mdb
-import json
-import yaml
-import time
-#import sys, os
-
-from .db_base import retry, with_transaction
-from .http_tools import errors as httperrors
-from .utils import Attempt
-
-
-_ATTEMPT = Attempt()
-
-
-tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
-                           "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
-                           "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
-                           "instance_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
-                           "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
-                           "instance_classifications", "instance_sfps", "wims", "wim_accounts", "wim_nfvo_tenants",
-                           "wim_port_mappings", "vim_wim_actions",
-                           "instance_wim_nets"]
-
-
-class nfvo_db(db_base.db_base):
-    def __init__(self, host=None, user=None, passwd=None, database=None,
-                 log_name='openmano.db', log_level=None, lock=None):
-        db_base.db_base.__init__(self, host, user, passwd, database,
-                                 log_name, log_level, lock)
-        db_base.db_base.tables_with_created_field=tables_with_createdat_field
-        return
-
-    @retry
-    @with_transaction
-    def new_vnf_as_a_whole(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
-        self.logger.debug("Adding new vnf to the NFVO database")
-        created_time = time.time()
-        myVNFDict = {}
-        myVNFDict["name"] = vnf_name
-        myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
-        myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
-        myVNFDict["description"] = vnf_descriptor['vnf']['description']
-        myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
-        myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
-
-        vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
-        #print "Adding new vms to the NFVO database"
-        #For each vm, we must create the appropriate vm in the NFVO database.
-        vmDict = {}
-        for _,vm in VNFCDict.iteritems():
-            #This code could make the name of the vms grow and grow.
-            #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
-            #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
-            #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
-            vm["vnf_id"] = vnf_id
-            created_time += 0.00001
-            vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-            #print "Internal vm id in NFVO DB: %s" % vm_id
-            vmDict[vm['name']] = vm_id
-
-        #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
-        bridgeInterfacesDict = {}
-        for vm in vnf_descriptor['vnf']['VNFC']:
-            if 'bridge-ifaces' in  vm:
-                bridgeInterfacesDict[vm['name']] = {}
-                for bridgeiface in vm['bridge-ifaces']:
-                    created_time += 0.00001
-                    if 'port-security' in bridgeiface:
-                        bridgeiface['port_security'] = bridgeiface.pop('port-security')
-                    if 'floating-ip' in bridgeiface:
-                        bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
-                    db_base._convert_bandwidth(bridgeiface, logger=self.logger)
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {}
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None)
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None)
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None)
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None)
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['port_security'] = \
-                        int(bridgeiface.get('port_security', True))
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['floating_ip'] = \
-                        int(bridgeiface.get('floating_ip', False))
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']]['created_time'] = created_time
-
-        # Collect the data interfaces of each VM/VNFC under the 'numas' field
-        dataifacesDict = {}
-        for vm in vnf_descriptor['vnf']['VNFC']:
-            dataifacesDict[vm['name']] = {}
-            for numa in vm.get('numas', []):
-                for dataiface in numa.get('interfaces', []):
-                    created_time += 0.00001
-                    db_base._convert_bandwidth(dataiface, logger=self.logger)
-                    dataifacesDict[vm['name']][dataiface['name']] = {}
-                    dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface.get('vpci')
-                    dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth']
-                    dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface[
-                                                                                            'dedicated'] == "yes" else (
-                    "VF" if dataiface['dedicated'] == "no" else "VFnotShared")
-                    dataifacesDict[vm['name']][dataiface['name']]['created_time'] = created_time
-
-        #For each internal connection, we add it to the interfaceDict and we  create the appropriate net in the NFVO database.
-        #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
-        internalconnList = []
-        if 'internal-connections' in vnf_descriptor['vnf']:
-            for net in vnf_descriptor['vnf']['internal-connections']:
-                #print "Net name: %s. Description: %s" % (net['name'], net['description'])
-
-                myNetDict = {}
-                myNetDict["name"] = net['name']
-                myNetDict["description"] = net['description']
-                myNetDict["type"] = net['type']
-                myNetDict["vnf_id"] = vnf_id
-
-                created_time += 0.00001
-                net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-
-                for element in net['elements']:
-                    ifaceItem = {}
-                    #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
-                    ifaceItem["internal_name"] = element['local_iface_name']
-                    #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
-                    ifaceItem["vm_id"] = vmDict[element['VNFC']]
-                    ifaceItem["net_id"] = net_id
-                    ifaceItem["type"] = net['type']
-                    if ifaceItem ["type"] == "data":
-                        dataiface = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
-                        ifaceItem["vpci"] =  dataiface['vpci']
-                        ifaceItem["bw"] =    dataiface['bw']
-                        ifaceItem["model"] = dataiface['model']
-                        created_time_iface = dataiface['created_time']
-                    else:
-                        bridgeiface =  bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
-                        ifaceItem["vpci"]          = bridgeiface['vpci']
-                        ifaceItem["mac"]           = bridgeiface['mac']
-                        ifaceItem["bw"]            = bridgeiface['bw']
-                        ifaceItem["model"]         = bridgeiface['model']
-                        ifaceItem["port_security"] = bridgeiface['port_security']
-                        ifaceItem["floating_ip"]   = bridgeiface['floating_ip']
-                        created_time_iface = bridgeiface['created_time']
-                    internalconnList.append(ifaceItem)
-                #print "Internal net id in NFVO DB: %s" % net_id
-
-        #print "Adding internal interfaces to the NFVO database (if any)"
-        for iface in internalconnList:
-            #print "Iface name: %s" % iface['internal_name']
-            iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
-            #print "Iface id in NFVO DB: %s" % iface_id
-
-        #print "Adding external interfaces to the NFVO database"
-        for iface in vnf_descriptor['vnf']['external-connections']:
-            myIfaceDict = {}
-            #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
-            myIfaceDict["internal_name"] = iface['local_iface_name']
-            #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
-            myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
-            myIfaceDict["external_name"] = iface['name']
-            myIfaceDict["type"] = iface['type']
-            if iface["type"] == "data":
-                dataiface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
-                myIfaceDict["vpci"]         = dataiface['vpci']
-                myIfaceDict["bw"]           = dataiface['bw']
-                myIfaceDict["model"]        = dataiface['model']
-                created_time_iface = dataiface['created_time']
-            else:
-                bridgeiface = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]
-                myIfaceDict["vpci"]         = bridgeiface['vpci']
-                myIfaceDict["bw"]           = bridgeiface['bw']
-                myIfaceDict["model"]        = bridgeiface['model']
-                myIfaceDict["mac"]          = bridgeiface['mac']
-                myIfaceDict["port_security"]= bridgeiface['port_security']
-                myIfaceDict["floating_ip"]  = bridgeiface['floating_ip']
-                created_time_iface = bridgeiface['created_time']
-            #print "Iface name: %s" % iface['name']
-            iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
-            #print "Iface id in NFVO DB: %s" % iface_id
-
-        return vnf_id
-
-    @retry
-    @with_transaction
-    def new_vnf_as_a_whole2(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
-        self.logger.debug("Adding new vnf to the NFVO database")
-        created_time = time.time()
-        myVNFDict = {}
-        myVNFDict["name"] = vnf_name
-        myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
-        myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false")
-        myVNFDict["description"] = vnf_descriptor['vnf']['description']
-        myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
-        myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
-
-        vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
-        #print "Adding new vms to the NFVO database"
-        #For each vm, we must create the appropriate vm in the NFVO database.
-        vmDict = {}
-        for _,vm in VNFCDict.iteritems():
-            #This code could make the name of the vms grow and grow.
-            #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
-            #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
-            #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
-            vm["vnf_id"] = vnf_id
-            created_time += 0.00001
-            vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-            #print "Internal vm id in NFVO DB: %s" % vm_id
-            vmDict[vm['name']] = vm_id
-
-        #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
-        bridgeInterfacesDict = {}
-        for vm in vnf_descriptor['vnf']['VNFC']:
-            if 'bridge-ifaces' in  vm:
-                bridgeInterfacesDict[vm['name']] = {}
-                for bridgeiface in vm['bridge-ifaces']:
-                    created_time += 0.00001
-                    db_base._convert_bandwidth(bridgeiface, logger=self.logger)
-                    if 'port-security' in bridgeiface:
-                        bridgeiface['port_security'] = bridgeiface.pop('port-security')
-                    if 'floating-ip' in bridgeiface:
-                        bridgeiface['floating_ip'] = bridgeiface.pop('floating-ip')
-                    ifaceDict = {}
-                    ifaceDict['vpci'] = bridgeiface.get('vpci',None)
-                    ifaceDict['mac'] = bridgeiface.get('mac_address',None)
-                    ifaceDict['bw'] = bridgeiface.get('bandwidth', None)
-                    ifaceDict['model'] = bridgeiface.get('model', None)
-                    ifaceDict['port_security'] = int(bridgeiface.get('port_security', True))
-                    ifaceDict['floating_ip'] = int(bridgeiface.get('floating_ip', False))
-                    ifaceDict['created_time'] = created_time
-                    bridgeInterfacesDict[vm['name']][bridgeiface['name']] = ifaceDict
-
-        # Collect the data interfaces of each VM/VNFC under the 'numas' field
-        dataifacesDict = {}
-        for vm in vnf_descriptor['vnf']['VNFC']:
-            dataifacesDict[vm['name']] = {}
-            for numa in vm.get('numas', []):
-                for dataiface in numa.get('interfaces', []):
-                    created_time += 0.00001
-                    db_base._convert_bandwidth(dataiface, logger=self.logger)
-                    ifaceDict = {}
-                    ifaceDict['vpci'] = dataiface.get('vpci')
-                    ifaceDict['bw'] = dataiface['bandwidth']
-                    ifaceDict['model'] = "PF" if dataiface['dedicated'] == "yes" else \
-                        ("VF" if dataiface['dedicated'] == "no" else "VFnotShared")
-                    ifaceDict['created_time'] = created_time
-                    dataifacesDict[vm['name']][dataiface['name']] = ifaceDict
-
-        #For each internal connection, we add it to the interfaceDict and we  create the appropriate net in the NFVO database.
-        #print "Adding new nets (VNF internal nets) to the NFVO database (if any)"
-        if 'internal-connections' in vnf_descriptor['vnf']:
-            for net in vnf_descriptor['vnf']['internal-connections']:
-                #print "Net name: %s. Description: %s" % (net['name'], net['description'])
-
-                myNetDict = {}
-                myNetDict["name"] = net['name']
-                myNetDict["description"] = net['description']
-                if (net["implementation"] == "overlay"):
-                    net["type"] = "bridge"
-                    #It should give an error if the type is e-line. For the moment, we consider it as a bridge
-                elif (net["implementation"] == "underlay"):
-                    if (net["type"] == "e-line"):
-                        net["type"] = "ptp"
-                    elif (net["type"] == "e-lan"):
-                        net["type"] = "data"
-                net.pop("implementation")
-                myNetDict["type"] = net['type']
-                myNetDict["vnf_id"] = vnf_id
-
-                created_time += 0.00001
-                net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-
-                if "ip-profile" in net:
-                    ip_profile = net["ip-profile"]
-                    myIPProfileDict = {}
-                    myIPProfileDict["net_id"] = net_id
-                    myIPProfileDict["ip_version"] = ip_profile.get('ip-version',"IPv4")
-                    myIPProfileDict["subnet_address"] = ip_profile.get('subnet-address',None)
-                    myIPProfileDict["gateway_address"] = ip_profile.get('gateway-address',None)
-                    myIPProfileDict["dns_address"] = ip_profile.get('dns-address',None)
-                    if ("dhcp" in ip_profile):
-                        myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
-                        myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
-                        myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
-
-                    created_time += 0.00001
-                    ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
-
-                for element in net['elements']:
-                    ifaceItem = {}
-                    #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
-                    ifaceItem["internal_name"] = element['local_iface_name']
-                    #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
-                    ifaceItem["vm_id"] = vmDict[element['VNFC']]
-                    ifaceItem["net_id"] = net_id
-                    ifaceItem["type"] = net['type']
-                    ifaceItem["ip_address"] = element.get('ip_address',None)
-                    if ifaceItem ["type"] == "data":
-                        ifaceDict = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
-                        ifaceItem["vpci"] =  ifaceDict['vpci']
-                        ifaceItem["bw"] =    ifaceDict['bw']
-                        ifaceItem["model"] = ifaceDict['model']
-                    else:
-                        ifaceDict = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]
-                        ifaceItem["vpci"] =  ifaceDict['vpci']
-                        ifaceItem["mac"] =  ifaceDict['mac']
-                        ifaceItem["bw"] =    ifaceDict['bw']
-                        ifaceItem["model"] = ifaceDict['model']
-                        ifaceItem["port_security"] = ifaceDict['port_security']
-                        ifaceItem["floating_ip"] = ifaceDict['floating_ip']
-                    created_time_iface = ifaceDict["created_time"]
-                    #print "Iface name: %s" % iface['internal_name']
-                    iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
-                    #print "Iface id in NFVO DB: %s" % iface_id
-
-        #print "Adding external interfaces to the NFVO database"
-        for iface in vnf_descriptor['vnf']['external-connections']:
-            myIfaceDict = {}
-            #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
-            myIfaceDict["internal_name"] = iface['local_iface_name']
-            #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
-            myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
-            myIfaceDict["external_name"] = iface['name']
-            myIfaceDict["type"] = iface['type']
-            if iface["type"] == "data":
-                myIfaceDict["vpci"]  = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
-                myIfaceDict["bw"]    = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
-                myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
-                created_time_iface = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['created_time']
-            else:
-                myIfaceDict["vpci"]  = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci']
-                myIfaceDict["bw"]    = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw']
-                myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model']
-                myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac']
-                myIfaceDict["port_security"] = \
-                    bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['port_security']
-                myIfaceDict["floating_ip"] = \
-                    bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['floating_ip']
-                created_time_iface = bridgeInterfacesDict[iface['VNFC']][iface['local_iface_name']]['created_time']
-            #print "Iface name: %s" % iface['name']
-            iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
-            #print "Iface id in NFVO DB: %s" % iface_id
-
-        return vnf_id
-
-#             except KeyError as e2:
-#                 exc_type, exc_obj, exc_tb = sys.exc_info()
-#                 fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
-#                 self.logger.debug("Exception type: %s; Filename: %s; Line number: %s", exc_type, fname, exc_tb.tb_lineno)
-#                 raise KeyError
-
-    @retry
-    @with_transaction
-    def new_scenario(self, scenario_dict):
-        created_time = time.time()
-        tenant_id = scenario_dict.get('tenant_id')
-        #scenario
-        INSERT_={'tenant_id': tenant_id,
-                    'name': scenario_dict['name'],
-                    'description': scenario_dict['description'],
-                    'public': scenario_dict.get('public', "false")}
-
-        scenario_uuid =  self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
-        #sce_nets
-        for net in scenario_dict['nets'].values():
-            net_dict={'scenario_id': scenario_uuid}
-            net_dict["name"] = net["name"]
-            net_dict["type"] = net["type"]
-            net_dict["description"] = net.get("description")
-            net_dict["external"] = net.get("external", False)
-            if "graph" in net:
-                #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256)
-                #TODO, must be json because of the GUI, change to yaml
-                net_dict["graph"]=json.dumps(net["graph"])
-            created_time += 0.00001
-            net_uuid =  self._new_row_internal('sce_nets', net_dict, add_uuid=True, root_uuid=scenario_uuid, created_time=created_time)
-            net['uuid']=net_uuid
-
-            if net.get("ip-profile"):
-                ip_profile = net["ip-profile"]
-                myIPProfileDict = {
-                    "sce_net_id": net_uuid,
-                    "ip_version": ip_profile.get('ip-version', "IPv4"),
-                    "subnet_address": ip_profile.get('subnet-address'),
-                    "gateway_address": ip_profile.get('gateway-address'),
-                    "dns_address": ip_profile.get('dns-address')}
-                if "dhcp" in ip_profile:
-                    myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled', "true")
-                    myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address')
-                    myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count')
-                self._new_row_internal('ip_profiles', myIPProfileDict)
-
-        # sce_vnfs
-        for k, vnf in scenario_dict['vnfs'].items():
-            INSERT_ = {'scenario_id': scenario_uuid,
-                        'name': k,
-                        'vnf_id': vnf['uuid'],
-                        # 'description': scenario_dict['name']
-                        'description': vnf['description']}
-            if "graph" in vnf:
-                #I NSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256)
-                # TODO, must be json because of the GUI, change to yaml
-                INSERT_["graph"] = json.dumps(vnf["graph"])
-            created_time += 0.00001
-            scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, add_uuid=True,
-                                                    root_uuid=scenario_uuid, created_time=created_time)
-            vnf['scn_vnf_uuid']=scn_vnf_uuid
-            # sce_interfaces
-            for iface in vnf['ifaces'].values():
-                # print 'iface', iface
-                if 'net_key' not in iface:
-                    continue
-                iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid']
-                INSERT_={'sce_vnf_id': scn_vnf_uuid,
-                            'sce_net_id': iface['net_id'],
-                            'interface_id':  iface['uuid'],
-                            'ip_address': iface.get('ip_address')}
-                created_time += 0.00001
-                iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
-                                                        root_uuid=scenario_uuid, created_time=created_time)
-
-        return scenario_uuid
-
-    @retry
-    @with_transaction
-    def edit_scenario(self, scenario_dict):
-        modified_time = time.time()
-        item_changed=0
-        #check that scenario exist
-        tenant_id = scenario_dict.get('tenant_id')
-        scenario_uuid = scenario_dict['uuid']
-
-        where_text = "uuid='{}'".format(scenario_uuid)
-        if not tenant_id and tenant_id != "any":
-            where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
-        cmd = "SELECT * FROM scenarios WHERE "+ where_text
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        self.cur.fetchall()
-        if self.cur.rowcount==0:
-            raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
-        elif self.cur.rowcount>1:
-            raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
-
-        #scenario
-        nodes = {}
-        topology = scenario_dict.pop("topology", None)
-        if topology != None and "nodes" in topology:
-            nodes = topology.get("nodes",{})
-        UPDATE_ = {}
-        if "name" in scenario_dict:        UPDATE_["name"] = scenario_dict["name"]
-        if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"]
-        if len(UPDATE_)>0:
-            WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid}
-            item_changed += self._update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time)
-        #sce_nets
-        for node_id, node in nodes.items():
-            if "graph" in node:
-                #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256)
-                #TODO, must be json because of the GUI, change to yaml
-                node["graph"] = json.dumps(node["graph"])
-            WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id}
-            #Try to change at sce_nets(version 0 API backward compatibility and sce_vnfs)
-            item_changed += self._update_rows('sce_nets', node, WHERE_)
-            item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
-        return item_changed
-
-#     def get_instance_scenario(self, instance_scenario_id, tenant_id=None):
-#         '''Obtain the scenario instance information, filtering by one or several of the tenant, uuid or name
-#         instance_scenario_id is the uuid or the name if it is not a valid uuid format
-#         Only one scenario isntance must mutch the filtering or an error is returned
-#         '''
-#         print "1******************************************************************"
-#         try:
-#             with self.transaction(mdb.cursors.DictCursor):
-#                 #scenario table
-#                 where_list=[]
-#                 if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
-#                 if db_base._check_valid_uuid(instance_scenario_id):
-#                     where_list.append( "uuid='" + instance_scenario_id +"'" )
-#                 else:
-#                     where_list.append( "name='" + instance_scenario_id +"'" )
-#                 where_text = " AND ".join(where_list)
-#                 self.cur.execute("SELECT * FROM instance_scenarios WHERE "+ where_text)
-#                 rows = self.cur.fetchall()
-#                 if self.cur.rowcount==0:
-#                     return -httperrors.Bad_Request, "No scenario instance found with this criteria " + where_text
-#                 elif self.cur.rowcount>1:
-#                     return -httperrors.Bad_Request, "More than one scenario instance found with this criteria " + where_text
-#                 instance_scenario_dict = rows[0]
-#
-#                 #instance_vnfs
-#                 self.cur.execute("SELECT uuid,vnf_id FROM instance_vnfs WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
-#                 instance_scenario_dict['instance_vnfs'] = self.cur.fetchall()
-#                 for vnf in instance_scenario_dict['instance_vnfs']:
-#                     #instance_vms
-#                     self.cur.execute("SELECT uuid, vim_vm_id "+
-#                                 "FROM instance_vms  "+
-#                                 "WHERE instance_vnf_id='" + vnf['uuid'] +"'"
-#                                 )
-#                     vnf['instance_vms'] = self.cur.fetchall()
-#                 #instance_nets
-#                 self.cur.execute("SELECT uuid, vim_net_id FROM instance_nets WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
-#                 instance_scenario_dict['instance_nets'] = self.cur.fetchall()
-#
-#                 #instance_interfaces
-#                 self.cur.execute("SELECT uuid, vim_interface_id, instance_vm_id, instance_net_id FROM instance_interfaces WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
-#                 instance_scenario_dict['instance_interfaces'] = self.cur.fetchall()
-#
-#                 db_base._convert_datetime2str(instance_scenario_dict)
-#                 db_base._convert_str2boolean(instance_scenario_dict, ('public','shared','external') )
-#                 print "2******************************************************************"
-#                 return 1, instance_scenario_dict
-#         except (mdb.Error, AttributeError) as e:
-#             print "nfvo_db.get_instance_scenario DB Exception %d: %s" % (e.args[0], e.args[1])
-#             return self._format_error(e)
-
-    @retry
-    @with_transaction(cursor='dict')
-    def get_scenario(self, scenario_id, tenant_id=None, datacenter_vim_id=None, datacenter_id=None):
-        '''Obtain the scenario information, filtering by one or several of the tenant, uuid or name
-        scenario_id is the uuid or the name if it is not a valid uuid format
-        if datacenter_vim_id,d datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid
-        Only one scenario must mutch the filtering or an error is returned
-        '''
-        where_text = "uuid='{}'".format(scenario_id)
-        if not tenant_id and tenant_id != "any":
-            where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
-        cmd = "SELECT * FROM scenarios WHERE " + where_text
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-        if self.cur.rowcount==0:
-            raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
-        elif self.cur.rowcount>1:
-            raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
-        scenario_dict = rows[0]
-        if scenario_dict["cloud_config"]:
-            scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"])
-        del scenario_dict["cloud_config"]
-        # sce_vnfs
-        cmd = "SELECT uuid,name,member_vnf_index,vnf_id,description FROM sce_vnfs WHERE scenario_id='{}' "\
-                "ORDER BY created_at".format(scenario_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        scenario_dict['vnfs'] = self.cur.fetchall()
-
-        for vnf in scenario_dict['vnfs']:
-            cmd = "SELECT mgmt_access FROM vnfs WHERE uuid='{}'".format(scenario_dict['vnfs'][0]['vnf_id'])
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            mgmt_access_dict = self.cur.fetchall()
-            if mgmt_access_dict[0].get('mgmt_access'):
-                vnf['mgmt_access'] = yaml.load(mgmt_access_dict[0]['mgmt_access'])
-            else:
-                vnf['mgmt_access'] = None
-            # sce_interfaces
-            cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
-                    " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
-                    " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            vnf['interfaces'] = self.cur.fetchall()
-            # vms
-            cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, image_list, vms.name as name," \
-                    " vms.description as description, vms.boot_data as boot_data, count," \
-                    " vms.availability_zone as availability_zone, vms.osm_id as osm_id, vms.pdu_type" \
-                    " FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
-                    " WHERE vnfs.uuid='" + vnf['vnf_id'] + "'"  \
-                    " ORDER BY vms.created_at"
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            vnf['vms'] = self.cur.fetchall()
-            for vm in vnf['vms']:
-                if vm["boot_data"]:
-                    vm["boot_data"] = yaml.safe_load(vm["boot_data"])
-                else:
-                    del vm["boot_data"]
-                if vm["image_list"]:
-                    vm["image_list"] = yaml.safe_load(vm["image_list"])
-                else:
-                    del vm["image_list"]
-                if datacenter_vim_id!=None:
-                    if vm['image_id']:
-                        cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND " \
-                                "datacenter_vim_id='{}'".format(vm['image_id'], datacenter_vim_id)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        if self.cur.rowcount==1:
-                            vim_image_dict = self.cur.fetchone()
-                            vm['vim_image_id']=vim_image_dict['vim_id']
-                    if vm['flavor_id']:
-                        cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND " \
-                                "datacenter_vim_id='{}'".format(vm['flavor_id'], datacenter_vim_id)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        if self.cur.rowcount==1:
-                            vim_flavor_dict = self.cur.fetchone()
-                            vm['vim_flavor_id']=vim_flavor_dict['vim_id']
-
-                #interfaces
-                cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
-                        "floating_ip, port_security" \
-                        " FROM interfaces" \
-                        " WHERE vm_id='{}'" \
-                        " ORDER BY created_at".format(vm['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                vm['interfaces'] = self.cur.fetchall()
-                for iface in vm['interfaces']:
-                    iface['port-security'] = iface.pop("port_security")
-                    iface['floating-ip'] = iface.pop("floating_ip")
-                    for sce_interface in vnf["interfaces"]:
-                        if sce_interface["interface_id"] == iface["uuid"]:
-                            if sce_interface["ip_address"]:
-                                iface["ip_address"] = sce_interface["ip_address"]
-                            break
-            #nets    every net of a vms
-            cmd = "SELECT uuid,name,type,description, osm_id FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            vnf['nets'] = self.cur.fetchall()
-            for vnf_net in vnf['nets']:
-                SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
-                cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                ipprofiles = self.cur.fetchall()
-                if self.cur.rowcount==1:
-                    vnf_net["ip_profile"] = ipprofiles[0]
-                elif self.cur.rowcount>1:
-                    raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), httperrors.Bad_Request)
-
-        #sce_nets
-        cmd = "SELECT uuid,name,type,external,description,vim_network_name, osm_id" \
-                " FROM sce_nets  WHERE scenario_id='{}'" \
-                " ORDER BY created_at ".format(scenario_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        scenario_dict['nets'] = self.cur.fetchall()
-        #datacenter_nets
-        for net in scenario_dict['nets']:
-            if str(net['external']) == 'false':
-                SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
-                cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                ipprofiles = self.cur.fetchall()
-                if self.cur.rowcount==1:
-                    net["ip_profile"] = ipprofiles[0]
-                elif self.cur.rowcount>1:
-                    raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
-                continue
-            WHERE_=" WHERE name='{}'".format(net['name'])
-            if datacenter_id!=None:
-                WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
-            cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            d_net = self.cur.fetchone()
-            if d_net==None or datacenter_vim_id==None:
-                #print "nfvo_db.get_scenario() WARNING external net %s not found"  % net['name']
-                net['vim_id']=None
-            else:
-                net['vim_id']=d_net['vim_net_id']
-
-        db_base._convert_datetime2str(scenario_dict)
-        db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
-
-        #forwarding graphs
-        cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
-                "ORDER BY created_at".format(scenario_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        scenario_dict['vnffgs'] = self.cur.fetchall()
-        for vnffg in scenario_dict['vnffgs']:
-            cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
-                    "ORDER BY created_at".format(vnffg['uuid'])
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            vnffg['rsps'] = self.cur.fetchall()
-            for rsp in vnffg['rsps']:
-                cmd = "SELECT uuid,if_order,ingress_interface_id,egress_interface_id,sce_vnf_id " \
-                        "FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
-                        "ORDER BY created_at".format(rsp['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                rsp['connection_points'] = self.cur.fetchall();
-                cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
-                        "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                rsp['classifier'] = self.cur.fetchone();
-                cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
-                        "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                rsp['classifier']['matches'] = self.cur.fetchall()
-
-        return scenario_dict
-
-    @retry(command="delete", extra="instances running")
-    @with_transaction(cursor='dict')
-    def delete_scenario(self, scenario_id, tenant_id=None):
-        '''Deletes a scenario, filtering by one or several of the tenant, uuid or name
-        scenario_id is the uuid or the name if it is not a valid uuid format
-        Only one scenario must mutch the filtering or an error is returned
-        '''
-        #scenario table
-        where_text = "uuid='{}'".format(scenario_id)
-        if not tenant_id and tenant_id != "any":
-            where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
-        cmd = "SELECT * FROM scenarios WHERE "+ where_text
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-        if self.cur.rowcount==0:
-            raise db_base.db_base_Exception("No scenario found where " + where_text, httperrors.Not_Found)
-        elif self.cur.rowcount>1:
-            raise db_base.db_base_Exception("More than one scenario found where " + where_text, httperrors.Conflict)
-        scenario_uuid = rows[0]["uuid"]
-        scenario_name = rows[0]["name"]
-
-        #sce_vnfs
-        cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-
-        return scenario_uuid + " " + scenario_name
-
-    @retry
-    @with_transaction
-    def new_rows(self, tables, uuid_list=None, confidential_data=False, attempt=_ATTEMPT):
-        """
-        Make a transactional insertion of rows at several tables. Can be also a deletion
-        :param tables: list with dictionary where the keys are the table names and the values are a row or row list
-            with the values to be inserted at the table. Each row is a dictionary with the key values. E.g.:
-            tables = [
-                {"table1": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
-                {"table2": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
-                {"table3": {"column1": value, "column2: value, ... }
-            }
-            If tables does not contain the 'created_at', it is generated incrementally with the order of tables. You can
-            provide a integer value, that it is an index multiply by 0.00001 to add to the created time to manually set
-            up and order
-            If dict contains {"TO-DELETE": uuid} the entry is deleted if exist instead of inserted
-        :param uuid_list: list of created uuids, first one is the root (#TODO to store at uuid table)
-        :return: None if success,  raise exception otherwise
-        """
-        table_name = None
-        created_time = time.time()
-        for table in tables:
-            for table_name, row_list in table.items():
-                index = 0
-                attempt.info['table'] = table_name
-                if isinstance(row_list, dict):
-                    row_list = (row_list, )  #create a list with the single value
-                for row in row_list:
-                    if "TO-DELETE" in row:
-                        self._delete_row_by_id_internal(table_name, row["TO-DELETE"])
-                        continue
-                    if table_name in self.tables_with_created_field:
-                        if "created_at" in row:
-                            created_time_param = created_time + (index + row.pop("created_at"))*0.00001
-                        else:
-                            created_time_param = created_time + index*0.00001
-                        index += 1
-                    else:
-                        created_time_param = 0
-                    self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
-                                            confidential_data=confidential_data,
-                                            created_time=created_time_param)
-
-    @retry
-    @with_transaction
-    def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
-        created_time = time.time()
-        #instance_scenarios
-        datacenter_id = scenarioDict['datacenter_id']
-        INSERT_={'tenant_id': tenant_id,
-            'datacenter_tenant_id': scenarioDict["datacenter2tenant"][datacenter_id],
-            'name': instance_scenario_name,
-            'description': instance_scenario_description,
-            'scenario_id' : scenarioDict['uuid'],
-            'datacenter_id': datacenter_id
-        }
-        if scenarioDict.get("cloud-config"):
-            INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
-
-        instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
-
-        net_scene2instance={}
-        #instance_nets   #nets interVNF
-        for net in scenarioDict['nets']:
-            net_scene2instance[ net['uuid'] ] ={}
-            datacenter_site_id = net.get('datacenter_id', datacenter_id)
-            if not "vim_id_sites" in net:
-                net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
-                net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
-            sce_net_id = net.get("uuid")
-
-            for datacenter_site_id,vim_id in net["vim_id_sites"].iteritems():
-                INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #,  'type': net['type']
-                INSERT_['datacenter_id'] = datacenter_site_id
-                INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
-                if not net.get('created', False):
-                    INSERT_['status'] = "ACTIVE"
-                if sce_net_id:
-                    INSERT_['sce_net_id'] = sce_net_id
-                created_time += 0.00001
-                instance_net_uuid =  self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
-                net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
-                net['uuid'] = instance_net_uuid  #overwrite scnario uuid by instance uuid
-
-            if 'ip_profile' in net:
-                net['ip_profile']['net_id'] = None
-                net['ip_profile']['sce_net_id'] = None
-                net['ip_profile']['instance_net_id'] = instance_net_uuid
-                created_time += 0.00001
-                ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
-
-        #instance_vnfs
-        for vnf in scenarioDict['vnfs']:
-            datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
-            INSERT_={'instance_scenario_id': instance_uuid,  'vnf_id': vnf['vnf_id']  }
-            INSERT_['datacenter_id'] = datacenter_site_id
-            INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
-            if vnf.get("uuid"):
-                INSERT_['sce_vnf_id'] = vnf['uuid']
-            created_time += 0.00001
-            instance_vnf_uuid =  self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
-            vnf['uuid'] = instance_vnf_uuid  #overwrite scnario uuid by instance uuid
-
-            #instance_nets   #nets intraVNF
-            for net in vnf['nets']:
-                net_scene2instance[ net['uuid'] ] = {}
-                INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid  } #,  'type': net['type']
-                INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
-                INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
-                if net.get("uuid"):
-                    INSERT_['net_id'] = net['uuid']
-                created_time += 0.00001
-                instance_net_uuid =  self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
-                net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
-                net['uuid'] = instance_net_uuid  #overwrite scnario uuid by instance uuid
-
-                if 'ip_profile' in net:
-                    net['ip_profile']['net_id'] = None
-                    net['ip_profile']['sce_net_id'] = None
-                    net['ip_profile']['instance_net_id'] = instance_net_uuid
-                    created_time += 0.00001
-                    ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
-
-            #instance_vms
-            for vm in vnf['vms']:
-                INSERT_={'instance_vnf_id': instance_vnf_uuid,  'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id']  }
-                created_time += 0.00001
-                instance_vm_uuid =  self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
-                vm['uuid'] = instance_vm_uuid  #overwrite scnario uuid by instance uuid
-
-                #instance_interfaces
-                for interface in vm['interfaces']:
-                    net_id = interface.get('net_id', None)
-                    if net_id is None:
-                        #check if is connected to a inter VNFs net
-                        for iface in vnf['interfaces']:
-                            if iface['interface_id'] == interface['uuid']:
-                                if 'ip_address' in iface:
-                                    interface['ip_address'] = iface['ip_address']
-                                net_id = iface.get('sce_net_id', None)
-                                break
-                    if net_id is None:
-                        continue
-                    interface_type='external' if interface['external_name'] is not None else 'internal'
-                    INSERT_={'instance_vm_id': instance_vm_uuid,  'instance_net_id': net_scene2instance[net_id][datacenter_site_id],
-                        'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type':  interface_type,
-                        'ip_address': interface.get('ip_address'), 'floating_ip': int(interface.get('floating-ip',False)),
-                        'port_security': int(interface.get('port-security',True))}
-                    #created_time += 0.00001
-                    interface_uuid =  self._new_row_internal('instance_interfaces', INSERT_, True, instance_uuid) #, created_time)
-                    interface['uuid'] = interface_uuid  #overwrite scnario uuid by instance uuid
-        return instance_uuid
-
-    @retry
-    @with_transaction(cursor='dict')
-    def get_instance_scenario(self, instance_id, tenant_id=None, verbose=False):
-        '''Obtain the instance information, filtering by one or several of the tenant, uuid or name
-        instance_id is the uuid or the name if it is not a valid uuid format
-        Only one instance must mutch the filtering or an error is returned
-        '''
-        # instance table
-        where_list = []
-        if tenant_id:
-            where_list.append("inst.tenant_id='{}'".format(tenant_id))
-        if db_base._check_valid_uuid(instance_id):
-            where_list.append("inst.uuid='{}'".format(instance_id))
-        else:
-            where_list.append("inst.name='{}'".format(instance_id))
-        where_text = " AND ".join(where_list)
-        cmd = "SELECT inst.uuid as uuid, inst.name as name, inst.scenario_id as scenario_id, datacenter_id"\
-                    " ,datacenter_tenant_id, s.name as scenario_name,inst.tenant_id as tenant_id" \
-                    " ,inst.description as description, inst.created_at as created_at" \
-                    " ,inst.cloud_config as cloud_config, s.osm_id as nsd_osm_id" \
-                " FROM instance_scenarios as inst left join scenarios as s on inst.scenario_id=s.uuid" \
-                " WHERE " + where_text
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-
-        if self.cur.rowcount == 0:
-            raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Not_Found)
-        elif self.cur.rowcount > 1:
-            raise db_base.db_base_Exception("More than one instance found where " + where_text,
-                                            httperrors.Bad_Request)
-        instance_dict = rows[0]
-        if instance_dict["cloud_config"]:
-            instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"])
-        del instance_dict["cloud_config"]
-
-        # instance_vnfs
-        cmd = "SELECT iv.uuid as uuid, iv.vnf_id as vnf_id, sv.name as vnf_name, sce_vnf_id, datacenter_id"\
-                ", datacenter_tenant_id, v.mgmt_access, sv.member_vnf_index, v.osm_id as vnfd_osm_id "\
-                "FROM instance_vnfs as iv left join sce_vnfs as sv "\
-                " on iv.sce_vnf_id=sv.uuid join vnfs as v on iv.vnf_id=v.uuid " \
-                "WHERE iv.instance_scenario_id='{}' " \
-                "ORDER BY iv.created_at ".format(instance_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        instance_dict['vnfs'] = self.cur.fetchall()
-        for vnf in instance_dict['vnfs']:
-            vnf["ip_address"] = None
-            vnf_mgmt_access_iface = None
-            vnf_mgmt_access_vm = None
-            if vnf["mgmt_access"]:
-                vnf_mgmt_access = yaml.load(vnf["mgmt_access"])
-                vnf_mgmt_access_iface = vnf_mgmt_access.get("interface_id")
-                vnf_mgmt_access_vm = vnf_mgmt_access.get("vm_id")
-                vnf["ip_address"] = vnf_mgmt_access.get("ip-address")
-
-            # instance vms
-            cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as "\
-                    "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid, related"\
-                    " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
-                    " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            vnf['vms'] = self.cur.fetchall()
-            for vm in vnf['vms']:
-                vm_manage_iface_list=[]
-                # instance_interfaces
-                cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
-                        " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id, i.uuid"\
-                        " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
-                        " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
-                self.logger.debug(cmd)
-                self.cur.execute(cmd )
-                vm['interfaces'] = self.cur.fetchall()
-                for iface in vm['interfaces']:
-                    if vnf_mgmt_access_iface and vnf_mgmt_access_iface == iface["uuid"]:
-                        if not vnf["ip_address"]:
-                            vnf["ip_address"] = iface["ip_address"]
-                    if iface["type"] == "mgmt" and iface["ip_address"]:
-                        vm_manage_iface_list.append(iface["ip_address"])
-                    if not verbose:
-                        del iface["type"]
-                    del iface["uuid"]
-                if vm_manage_iface_list:
-                    vm["ip_address"] = ",".join(vm_manage_iface_list)
-                    if not vnf["ip_address"] and vnf_mgmt_access_vm == vm["vm_uuid"]:
-                        vnf["ip_address"] = vm["ip_address"]
-                del vm["vm_uuid"]
-
-        #instance_nets
-        #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
-        #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
-        #            "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
-        #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
-        cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
-                "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
-                "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name, related " \
-                "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
-                "left join nets on inets.net_id=nets.uuid " \
-                "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        instance_dict['nets'] = self.cur.fetchall()
-
-        #instance_sfps
-        cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info, related"\
-                " FROM instance_sfps" \
-                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        instance_dict['sfps'] = self.cur.fetchall()
-
-        # for sfp in instance_dict['sfps']:
-        #instance_sfs
-        cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info, related"\
-                " FROM instance_sfs" \
-                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        instance_dict['sfs'] = self.cur.fetchall()
-
-        #for sf in instance_dict['sfs']:
-        #instance_sfis
-        cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info, related"\
-                " FROM instance_sfis" \
-                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        instance_dict['sfis'] = self.cur.fetchall()
-#                            for sfi in instance_dict['sfi']:
-
-        #instance_classifications
-        cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info, related"\
-                " FROM instance_classifications" \
-                " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        instance_dict['classifications'] = self.cur.fetchall()
-#                    for classification in instance_dict['classifications']
-
-        db_base._convert_datetime2str(instance_dict)
-        db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
-        return instance_dict
-
-    @retry(command='delete', extra='No dependences can avoid deleting!!!!')
-    @with_transaction(cursor='dict')
-    def delete_instance_scenario(self, instance_id, tenant_id=None):
-        '''Deletes a instance_Scenario, filtering by one or several of the tenant, uuid or name
-        instance_id is the uuid or the name if it is not a valid uuid format
-        Only one instance_scenario must mutch the filtering or an error is returned
-        '''
-        #instance table
-        where_list=[]
-        if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
-        if db_base._check_valid_uuid(instance_id):
-            where_list.append( "uuid='" + instance_id +"'" )
-        else:
-            where_list.append( "name='" + instance_id +"'" )
-        where_text = " AND ".join(where_list)
-        cmd = "SELECT * FROM instance_scenarios WHERE "+ where_text
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        rows = self.cur.fetchall()
-
-        if self.cur.rowcount==0:
-            raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Bad_Request)
-        elif self.cur.rowcount>1:
-            raise db_base.db_base_Exception("More than one instance found where " + where_text, httperrors.Bad_Request)
-        instance_uuid = rows[0]["uuid"]
-        instance_name = rows[0]["name"]
-
-        #sce_vnfs
-        cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-
-        return instance_uuid + " " + instance_name
-
-    @retry(table='instance_scenarios')
-    @with_transaction
-    def new_instance_scenario(self, instance_scenario_dict, tenant_id):
-        #return self.new_row('vnfs', vnf_dict, None, tenant_id, True, True)
-        return self._new_row_internal('instance_scenarios', instance_scenario_dict, tenant_id, add_uuid=True, root_uuid=None, log=True)
-
-    def update_instance_scenario(self, instance_scenario_dict):
-        #TODO:
-        return
-
-    @retry(table='instance_vnfs')
-    @with_transaction
-    def new_instance_vnf(self, instance_vnf_dict, tenant_id, instance_scenario_id = None):
-        #return self.new_row('vms', vm_dict, tenant_id, True, True)
-        return self._new_row_internal('instance_vnfs', instance_vnf_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
-
-    def update_instance_vnf(self, instance_vnf_dict):
-        #TODO:
-        return
-
-    def delete_instance_vnf(self, instance_vnf_id):
-        #TODO:
-        return
-
-    @retry(table='instance_vms')
-    @with_transaction
-    def new_instance_vm(self, instance_vm_dict, tenant_id, instance_scenario_id = None):
-        #return self.new_row('vms', vm_dict, tenant_id, True, True)
-        return self._new_row_internal('instance_vms', instance_vm_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
-
-    def update_instance_vm(self, instance_vm_dict):
-        #TODO:
-        return
-
-    def delete_instance_vm(self, instance_vm_id):
-        #TODO:
-        return
-
-    @retry(table='instance_nets')
-    @with_transaction
-    def new_instance_net(self, instance_net_dict, tenant_id, instance_scenario_id = None):
-        return self._new_row_internal('instance_nets', instance_net_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
-
-    def update_instance_net(self, instance_net_dict):
-        #TODO:
-        return
-
-    def delete_instance_net(self, instance_net_id):
-        #TODO:
-        return
-
-    @retry(table='instance_interfaces')
-    @with_transaction
-    def new_instance_interface(self, instance_interface_dict, tenant_id, instance_scenario_id = None):
-        return self._new_row_internal('instance_interfaces', instance_interface_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
-
-    def update_instance_interface(self, instance_interface_dict):
-        #TODO:
-        return
-
-    def delete_instance_interface(self, instance_interface_dict):
-        #TODO:
-        return
-
-    @retry(table='datacenter_nets')
-    @with_transaction
-    def update_datacenter_nets(self, datacenter_id, new_net_list=[]):
-        ''' Removes the old and adds the new net list at datacenter list for one datacenter.
-        Attribute
-            datacenter_id: uuid of the datacenter to act upon
-            table: table where to insert
-            new_net_list: the new values to be inserted. If empty it only deletes the existing nets
-        Return: (Inserted items, Deleted items) if OK, (-Error, text) if error
-        '''
-        created_time = time.time()
-        cmd="DELETE FROM datacenter_nets WHERE datacenter_id='{}'".format(datacenter_id)
-        self.logger.debug(cmd)
-        self.cur.execute(cmd)
-        deleted = self.cur.rowcount
-        inserted = 0
-        for new_net in new_net_list:
-            created_time += 0.00001
-            self._new_row_internal('datacenter_nets', new_net, add_uuid=True, created_time=created_time)
-            inserted += 1
-        return inserted, deleted
diff --git a/osm_ro/openmano_schemas.py b/osm_ro/openmano_schemas.py
deleted file mode 100644 (file)
index 8fd2889..0000000
+++ /dev/null
@@ -1,1268 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API
-'''
-__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
-__date__ ="$09-oct-2014 09:09:48$"
-
-#Basis schemas
-patern_name="^[ -~]+$"
-passwd_schema={"type" : "string", "minLength":1, "maxLength":60}
-nameshort_schema={"type" : "string", "minLength":1, "maxLength":60, "pattern" : "^[^,;()'\"]+$"}
-name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
-xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
-description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
-id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 }  #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
-id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
-# allows [] for wildcards. For that reason huge length limit is set
-pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\[\]]{12,40}$"}
-
-http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
-bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
-memory_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]i?[Bb])?$"}
-integer0_schema={"type":"integer","minimum":0}
-integer1_schema={"type":"integer","minimum":1}
-path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
-vlan_schema={"type":"integer","minimum":1,"maximum":4095}
-vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
-mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0
-#mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
-ip_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
-ip_prefix_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
-port_schema={"type":"integer","minimum":1,"maximum":65534}
-object_schema={"type":"object"}
-schema_version_2={"type":"integer","minimum":2,"maximum":2}
-#schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
-log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
-checksum_schema={"type":"string", "pattern":"^[0-9a-fA-F]{32}$"}
-size_schema={"type":"integer","minimum":1,"maximum":100}
-boolean_schema = {"type": "boolean"}
-null_schema = {"type": "null"}
-
-metadata_schema={
-    "type":"object",
-    "properties":{
-        "architecture": {"type":"string"},
-        "use_incremental": {"type":"string","enum":["yes","no"]},
-        "vpci": pci_schema,
-        "os_distro": {"type":"string"},
-        "os_type": {"type":"string"},
-        "os_version": {"type":"string"},
-        "bus": {"type":"string"},
-        "topology": {"type":"string", "enum": ["oneSocket"]}
-    }
-}
-
-#Schema for the configuration file
-config_schema = {
-    "title":"configuration response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "http_port": port_schema,
-        "http_admin_port": port_schema,
-        "http_host": nameshort_schema,
-        "auto_push_VNF_to_VIMs": boolean_schema,
-        "vnf_repository": path_schema,
-        "db_host": nameshort_schema,
-        "db_user": nameshort_schema,
-        "db_passwd": {"type":"string"},
-        "db_name": nameshort_schema,
-        "db_ovim_host": nameshort_schema,
-        "db_ovim_user": nameshort_schema,
-        "db_ovim_passwd": {"type":"string"},
-        "db_ovim_name": nameshort_schema,
-        # Next fields will disappear once the MANO API includes appropriate primitives
-        "vim_url": http_schema,
-        "vim_url_admin": http_schema,
-        "vim_name": nameshort_schema,
-        "vim_tenant_name": nameshort_schema,
-        "mano_tenant_name": nameshort_schema,
-        "mano_tenant_id": id_schema,
-        "http_console_proxy": boolean_schema,
-        "http_console_host": nameshort_schema,
-        "http_console_ports": {
-            "type": "array",
-            "items": {"OneOf": [
-                port_schema,
-                {"type": "object", "properties": {"from": port_schema, "to": port_schema}, "required": ["from", "to"]}
-            ]}
-        },
-        "log_level": log_level_schema,
-        "log_socket_level": log_level_schema,
-        "log_level_db": log_level_schema,
-        "log_level_vim": log_level_schema,
-        "log_level_wim": log_level_schema,
-        "log_level_nfvo": log_level_schema,
-        "log_level_http": log_level_schema,
-        "log_level_console": log_level_schema,
-        "log_level_ovim": log_level_schema,
-        "log_file_db": path_schema,
-        "log_file_vim": path_schema,
-        "log_file_wim": path_schema,
-        "log_file_nfvo": path_schema,
-        "log_file_http": path_schema,
-        "log_file_console": path_schema,
-        "log_file_ovim": path_schema,
-        "log_socket_host": nameshort_schema,
-        "log_socket_port": port_schema,
-        "log_file": path_schema,
-    },
-    "required": ['db_user', 'db_passwd', 'db_name'],
-    "additionalProperties": False
-}
-
-tenant_schema = {
-    "title":"tenant information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "tenant":{
-            "type":"object",
-            "properties":{
-                "name": nameshort_schema,
-                "description": description_schema,
-            },
-            "required": ["name"],
-            "additionalProperties": True
-        }
-    },
-    "required": ["tenant"],
-    "additionalProperties": False
-}
-
-tenant_edit_schema = {
-    "title":"tenant edit information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "tenant":{
-            "type":"object",
-            "properties":{
-                "name": name_schema,
-                "description": description_schema,
-            },
-            "additionalProperties": False
-        }
-    },
-    "required": ["tenant"],
-    "additionalProperties": False
-}
-
-datacenter_schema_properties={
-    "name": name_schema,
-    "description": description_schema,
-    "type": nameshort_schema, #currently "openvim" or "openstack", can be enlarged with plugins
-    "vim_url": description_schema,
-    "vim_url_admin": description_schema,
-    "config": { "type":"object" }
-}
-
-datacenter_schema = {
-    "title":"datacenter information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "datacenter":{
-            "type":"object",
-            "properties":datacenter_schema_properties,
-            "required": ["name", "vim_url"],
-            "additionalProperties": True
-        }
-    },
-    "required": ["datacenter"],
-    "additionalProperties": False
-}
-
-
-datacenter_edit_schema = {
-    "title":"datacenter edit nformation schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "datacenter":{
-            "type":"object",
-            "properties":datacenter_schema_properties,
-            "additionalProperties": False
-        }
-    },
-    "required": ["datacenter"],
-    "additionalProperties": False
-}
-
-
-netmap_new_schema = {
-    "title":"netmap new information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "netmap":{   #delete from datacenter
-            "type":"object",
-            "properties":{
-                "name": name_schema,  #name or uuid of net to change
-                "vim_id": id_schema,
-                "vim_name": name_schema
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        },
-    },
-    "required": ["netmap"],
-    "additionalProperties": False
-}
-
-netmap_edit_schema = {
-    "title":"netmap edit information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "netmap":{   #delete from datacenter
-            "type":"object",
-            "properties":{
-                "name": name_schema,  #name or uuid of net to change
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        },
-    },
-    "required": ["netmap"],
-    "additionalProperties": False
-}
-
-datacenter_action_schema = {
-    "title":"datacenter action information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "check-connectivity": {"type": "null"},
-        "net-update": {"type": "null"},
-        "net-edit": {
-            "type":"object",
-            "properties":{
-                "net": name_schema,  #name or uuid of net to change
-                "name": name_schema,
-                "description": description_schema,
-                "shared": boolean_schema
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        },
-        "net-delete":{
-            "type":"object",
-            "properties":{
-                "net": name_schema,  #name or uuid of net to change
-            },
-            "required": ["net"],
-            "additionalProperties": False
-        },
-    },
-    "minProperties": 1,
-    "maxProperties": 1,
-    "additionalProperties": False
-}
-
-
-datacenter_associate_schema={
-    "title":"datacenter associate information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "datacenter":{
-            "type": "object",
-            "properties": {
-                "name": name_schema,
-                "vim_id": id_schema,
-                "vim_tenant": name_schema,
-                "vim_tenant_name": name_schema,
-                "vim_username": nameshort_schema,
-                "vim_password": nameshort_schema,
-                "config": {"type": "object"}
-            },
-            # "required": ["vim_tenant"],
-            "additionalProperties": True
-        }
-    },
-    "required": ["datacenter"],
-    "additionalProperties": False
-}
-
-dhcp_schema = {
-    "title": "DHCP schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties":{
-        "enabled": boolean_schema,
-        "start-address": {"OneOf": [null_schema, ip_schema]},
-        "count": integer0_schema
-    },
-    # "required": ["start-address", "count"],
-}
-
-ip_profile_schema = {
-    "title": "IP profile schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "ip-version": {"type": "string", "enum": ["IPv4", "IPv6"]},
-        "subnet-address": ip_prefix_schema,
-        "gateway-address": ip_schema,
-        "dns-address": {"oneOf": [ip_schema,     # for backward compatibility
-                                  {"type": "array", "items": ip_schema}]},
-        "dhcp": dhcp_schema
-    },
-}
-
-key_pair_schema = {
-    "title": "Key-pair schema for cloud-init configuration schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "name": name_schema,
-        "key": {"type":"string"}
-    },
-    "required": ["key"],
-    "additionalProperties": False
-}
-
-cloud_config_user_schema = {
-    "title": "User schema for cloud-init configuration schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "name": nameshort_schema,
-        "user-info": {"type":"string"},
-        #"key-pairs": {"type" : "array", "items": key_pair_schema}
-        "key-pairs": {"type" : "array", "items": {"type":"string"}}
-    },
-    "required": ["name"],
-    "additionalProperties": False
-}
-
-cloud_config_schema = {
-    "title": "Cloud-init configuration schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        #"key-pairs": {"type" : "array", "items": key_pair_schema},
-        "key-pairs": {"type" : "array", "items": {"type":"string"}},
-        "users": {"type" : "array", "items": cloud_config_user_schema}
-    },
-    "additionalProperties": False
-}
-
-internal_connection_element_schema = {
-    "type":"object",
-    "properties":{
-        "VNFC": name_schema,
-        "local_iface_name": name_schema
-    }
-}
-
-internal_connection_element_schema_v02 = {
-    "type":"object",
-    "properties":{
-        "VNFC": name_schema,
-        "local_iface_name": name_schema,
-        "ip_address": ip_schema
-    }
-}
-
-internal_connection_schema = {
-    "type":"object",
-    "properties":{
-        "name": name_schema,
-        "description":description_schema,
-        "type":{"type":"string", "enum":["bridge","data","ptp"]},
-        "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":1}
-    },
-    "required": ["name", "type", "elements"],
-    "additionalProperties": False
-}
-
-internal_connection_schema_v02 = {
-    "type":"object",
-    "properties":{
-        "name": name_schema,
-        "description":description_schema,
-        "type": {"type": "string", "enum":["e-line", "e-lan"]},
-        "implementation": {"type": "string", "enum":["overlay", "underlay"]},
-        "ip-profile": ip_profile_schema,
-        "elements": {"type" : "array", "items": internal_connection_element_schema_v02, "minItems":1}
-    },
-    "required": ["name", "type", "implementation", "elements"],
-    "additionalProperties": False
-}
-
-external_connection_schema = {
-    "type":"object",
-    "properties":{
-        "name": name_schema,
-        "type":{"type":"string", "enum":["mgmt","bridge","data"]},
-        "VNFC": name_schema,
-        "local_iface_name": name_schema ,
-        "description":description_schema
-    },
-    "required": ["name", "type", "VNFC", "local_iface_name"],
-    "additionalProperties": False
-}
-
-#Not yet used
-external_connection_schema_v02 = {
-    "type":"object",
-    "properties":{
-        "name": name_schema,
-        "mgmt": boolean_schema,
-        "type": {"type": "string", "enum":["e-line", "e-lan"]},
-        "implementation": {"type": "string", "enum":["overlay", "underlay"]},
-        "VNFC": name_schema,
-        "local_iface_name": name_schema ,
-        "description":description_schema
-    },
-    "required": ["name", "type", "VNFC", "local_iface_name"],
-    "additionalProperties": False
-}
-
-interfaces_schema={
-    "type":"array",
-    "items":{
-        "type":"object",
-        "properties":{
-            "name":name_schema,
-            "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
-            "bandwidth":bandwidth_schema,
-            "vpci":pci_schema,
-            "mac_address": mac_schema
-        },
-        "additionalProperties": False,
-        "required": ["name","dedicated", "bandwidth"]
-    }
-}
-
-bridge_interfaces_schema={
-    "type":"array",
-    "items":{
-        "type":"object",
-        "properties":{
-            "name": name_schema,
-            "bandwidth":bandwidth_schema,
-            "vpci":pci_schema,
-            "mac_address": mac_schema,
-            "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139", "paravirt"]},
-            "port-security": boolean_schema,
-            "floating-ip": boolean_schema,
-        },
-        "additionalProperties": False,
-        "required": ["name"]
-    }
-}
-
-devices_schema={
-    "type":"array",
-    "items":{
-        "type":"object",
-        "properties":{
-            "type":{"type":"string", "enum":["disk","cdrom","xml"] },
-            "image": path_schema,
-            "image name": name_schema,
-            "image checksum": checksum_schema,
-            "image metadata": metadata_schema,
-            "size": size_schema,
-            "vpci":pci_schema,
-            "xml":xml_text_schema,
-            "name": name_schema,
-        },
-        "additionalProperties": False,
-        "required": ["type"]
-    }
-}
-
-
-numa_schema = {
-    "type": "object",
-    "properties": {
-        "memory":integer1_schema,
-        "cores":integer1_schema,
-        "paired-threads":integer1_schema,
-        "threads":integer1_schema,
-        "cores-id":{"type":"array","items":integer0_schema},
-        "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
-        "threads-id":{"type":"array","items":integer0_schema},
-        "interfaces":interfaces_schema
-    },
-    "additionalProperties": False,
-    #"required": ["memory"]
-}
-
-config_files_schema = {
-    "title": "Config files for cloud init schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "dest": path_schema,
-        "encoding": {"type": "string", "enum": ["b64", "base64", "gz", "gz+b64", "gz+base64", "gzip+b64", "gzip+base64"]},  #by default text
-        "content": {"type": "string"},
-        "permissions": {"type": "string"}, # tiypically octal notation '0644'
-        "owner": {"type": "string"},  # format:   owner:group
-
-    },
-    "additionalProperties": False,
-    "required": ["dest", "content"],
-}
-
-boot_data_vdu_schema  = {
-    "title": "Boot data (Cloud-init) configuration schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties":{
-        "key-pairs": {"type" : "array", "items": {"type":"string"}},
-        "users": {"type" : "array", "items": cloud_config_user_schema},
-        "user-data": {"type" : "string"},  # scrip to run
-        "config-files": {"type": "array", "items": config_files_schema},
-        # NOTE: “user-data” are mutually exclusive with users and config-files because user/files are injected using user-data
-        "boot-data-drive": boolean_schema,
-    },
-    "additionalProperties": False,
-}
-
-vnfc_schema = {
-    "type":"object",
-    "properties":{
-        "name": name_schema,
-        "description": description_schema,
-        "count": integer1_schema,
-        "image name": name_schema,
-        "availability_zone": name_schema,
-        "VNFC image": {"oneOf": [path_schema, http_schema]},
-        "image checksum": checksum_schema,
-        "image metadata": metadata_schema,
-        #"cloud-config": cloud_config_schema, #common for all vnfs in the scenario
-        "processor": {
-            "type":"object",
-            "properties":{
-                "model":description_schema,
-                "features":{"type":"array","items":nameshort_schema}
-            },
-            "required": ["model"],
-            "additionalProperties": False
-        },
-        "hypervisor": {
-            "type":"object",
-            "properties":{
-                "type":nameshort_schema,
-                "version":description_schema
-            },
-        },
-        "ram":integer0_schema,
-        "vcpus":integer0_schema,
-        "disk": integer1_schema,
-        "numas": {
-            "type": "array",
-            "items": numa_schema
-        },
-        "bridge-ifaces": bridge_interfaces_schema,
-        "devices": devices_schema,
-        "boot-data" : boot_data_vdu_schema
-
-    },
-    "required": ["name"],
-    "oneOf": [
-        {"required": ["VNFC image"]},
-        {"required": ["image name"]}
-    ],
-    "additionalProperties": False
-}
-
-vnfd_schema_v01 = {
-    "title":"vnfd information schema v0.1",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "vnf":{
-            "type":"object",
-            "properties":{
-                "name": name_schema,
-                "description": description_schema,
-
-                "class": nameshort_schema,
-                "public": boolean_schema,
-                "physical": boolean_schema,
-                "default_user": name_schema,
-                "tenant_id": id_schema, #only valid for admin
-                "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
-                "internal-connections": {"type" : "array", "items": internal_connection_schema, "minItems":1},
-                "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
-            },
-            "required": ["name","external-connections"],
-            "additionalProperties": True
-        }
-    },
-    "required": ["vnf"],
-    "additionalProperties": False
-}
-
-#VNFD schema for OSM R1
-vnfd_schema_v02 = {
-    "title":"vnfd information schema v0.2",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "schema_version": {"type": "string", "enum": ["0.2"]},
-        "vnf":{
-            "type":"object",
-            "properties":{
-                "name": name_schema,
-                "description": description_schema,
-                "class": nameshort_schema,
-                "public": boolean_schema,
-                "physical": boolean_schema,
-                "tenant_id": id_schema, #only valid for admin
-                "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
-                "internal-connections": {"type" : "array", "items": internal_connection_schema_v02, "minItems":1},
-                # "cloud-config": cloud_config_schema, #common for all vnfcs
-                "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
-            },
-            "required": ["name"],
-            "additionalProperties": True
-        }
-    },
-    "required": ["vnf", "schema_version"],
-    "additionalProperties": False
-}
-
-#vnfd_schema = vnfd_schema_v01
-#{
-#    "title":"vnfd information schema v0.2",
-#    "$schema": "http://json-schema.org/draft-04/schema#",
-#    "oneOf": [vnfd_schema_v01, vnfd_schema_v02]
-#}
-
-graph_schema = {
-    "title":"graphical scenario descriptor information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "x":      integer0_schema,
-        "y":      integer0_schema,
-        "ifaces": {
-            "type":"object",
-            "properties":{
-                "left": {"type":"array"},
-                "right": {"type":"array"},
-                "bottom": {"type":"array"},
-            }
-        }
-    },
-    "required": ["x","y"]
-}
-
-nsd_schema_v01 = {
-    "title":"network scenario descriptor information schema v0.1",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "name":name_schema,
-        "description": description_schema,
-        "tenant_id": id_schema, #only valid for admin
-        "public": boolean_schema,
-        "topology":{
-            "type":"object",
-            "properties":{
-                "nodes": {
-                    "type":"object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "type":{"type":"string", "enum":["VNF", "other_network", "network", "external_network"]},
-                                "vnf_id": id_schema,
-                                "graph": graph_schema,
-                            },
-                            "patternProperties":{
-                                "^(VNF )?model$": {"type": "string"}
-                            },
-                            "required": ["type"]
-                        }
-                    }
-                },
-                "connections": {
-                    "type":"object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "nodes":{"oneOf":[{"type":"object", "minProperties":2}, {"type":"array", "minLength":1}]},
-                                "type": {"type": "string", "enum":["link", "external_network", "dataplane_net", "bridge_net"]},
-                                "graph": graph_schema
-                            },
-                            "required": ["nodes"]
-                        },
-                    }
-                }
-            },
-            "required": ["nodes"],
-            "additionalProperties": False
-        }
-    },
-    "required": ["name","topology"],
-    "additionalProperties": False
-}
-
-nsd_schema_v02 = {
-    "title":"network scenario descriptor information schema v0.2",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "schema_version": schema_version_2,
-        "scenario":{
-            "type":"object",
-            "properties":{
-                "name": name_schema,
-                "description": description_schema,
-                "tenant_id": id_schema, #only valid for admin
-                "public": boolean_schema,
-                "vnfs": {
-                    "type":"object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "vnf_id": id_schema,
-                                "graph": graph_schema,
-                                "vnf_name": name_schema,
-                            },
-                        }
-                    },
-                    "minProperties": 1
-                },
-                "networks": {
-                    "type":"object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "interfaces":{"type":"array", "minLength":1},
-                                "type": {"type": "string", "enum":["dataplane", "bridge"]},
-                                "external" : boolean_schema,
-                                "graph": graph_schema
-                            },
-                            "required": ["interfaces"]
-                        },
-                    }
-                },
-
-            },
-            "required": ["vnfs", "name"],
-            "additionalProperties": False
-        }
-    },
-    "required": ["scenario","schema_version"],
-    "additionalProperties": False
-}
-
-#NSD schema for OSM R1
-nsd_schema_v03 = {
-    "title":"network scenario descriptor information schema v0.3",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "schema_version": {"type": "string", "enum": ["0.3"]},
-        "scenario":{
-            "type":"object",
-            "properties":{
-                "name": name_schema,
-                "description": description_schema,
-                "tenant_id": id_schema, #only valid for admin
-                "public": boolean_schema,
-                "cloud-config": cloud_config_schema, #common for all vnfs in the scenario
-                #"datacenter": name_schema,
-                "vnfs": {
-                    "type":"object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "vnf_id": id_schema,
-                                "graph": graph_schema,
-                                "vnf_name": name_schema,
-                                #"cloud-config": cloud_config_schema, #particular for a vnf
-                                #"datacenter": name_schema,
-                                "internal-connections": {
-                                    "type": "object",
-                                    "patternProperties": {
-                                        ".": {
-                                            "type": "object",
-                                            "properties": {
-                                                "ip-profile": ip_profile_schema,
-                                                "elements": {
-                                                    "type" : "array",
-                                                    "items":{
-                                                        "type":"object",
-                                                        "properties":{
-                                                            "VNFC": name_schema,
-                                                            "local_iface_name": name_schema,
-                                                            "ip_address": ip_schema,
-                                                        },
-                                                        "required": ["VNFC", "local_iface_name"],
-                                                    }
-                                                }
-                                            }
-                                        }
-                                    }
-                                }
-                            },
-                        }
-                    },
-                    "minProperties": 1
-                },
-                "networks": {
-                    "type":"object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "interfaces":{
-                                    "type":"array",
-                                    "minLength":1,
-                                    "items":{
-                                        "type":"object",
-                                        "properties":{
-                                            "vnf": name_schema,
-                                            "vnf_interface": name_schema,
-                                            "ip_address": ip_schema
-                                        },
-                                        "required": ["vnf", "vnf_interface"],
-                                    }
-                                },
-                                "type": {"type": "string", "enum":["e-line", "e-lan"]},
-                                "implementation": {"type": "string", "enum":["overlay", "underlay"]},
-                                "external" : boolean_schema,
-                                "graph": graph_schema,
-                                "ip-profile": ip_profile_schema
-                            },
-                            "required": ["interfaces"]
-                        },
-                    }
-                },
-
-            },
-            "required": ["vnfs", "networks","name"],
-            "additionalProperties": False
-        }
-    },
-    "required": ["scenario","schema_version"],
-    "additionalProperties": False
-}
-
-#scenario_new_schema = {
-#    "title":"new scenario information schema",
-#    "$schema": "http://json-schema.org/draft-04/schema#",
-#    #"oneOf": [nsd_schema_v01, nsd_schema_v02]
-#    "oneOf": [nsd_schema_v01]
-#}
-
-scenario_edit_schema = {
-    "title":"edit scenario information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "name":name_schema,
-        "description": description_schema,
-        "topology":{
-            "type":"object",
-            "properties":{
-                "nodes": {
-                    "type":"object",
-                    "patternProperties":{
-                        "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$": {
-                            "type":"object",
-                            "properties":{
-                                "graph":{
-                                    "type": "object",
-                                    "properties":{
-                                        "x": integer0_schema,
-                                        "y": integer0_schema,
-                                        "ifaces":{ "type": "object"}
-                                    }
-                                },
-                                "description": description_schema,
-                                "name": name_schema
-                            }
-                        }
-                    }
-                }
-            },
-            "required": ["nodes"],
-            "additionalProperties": False
-        }
-    },
-    "additionalProperties": False
-}
-
-scenario_action_schema = {
-    "title":"scenario action information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "start":{
-            "type": "object",
-            "properties": {
-                "instance_name":name_schema,
-                "description":description_schema,
-                "datacenter": {"type": "string"}
-            },
-            "required": ["instance_name"]
-        },
-        "deploy":{
-            "type": "object",
-            "properties": {
-                "instance_name":name_schema,
-                "description":description_schema,
-                "datacenter": {"type": "string"}
-            },
-            "required": ["instance_name"]
-        },
-        "reserve":{
-            "type": "object",
-            "properties": {
-                "instance_name":name_schema,
-                "description":description_schema,
-                "datacenter": {"type": "string"}
-            },
-            "required": ["instance_name"]
-        },
-        "verify":{
-            "type": "object",
-            "properties": {
-                "instance_name":name_schema,
-                "description":description_schema,
-                "datacenter": {"type": "string"}
-            },
-            "required": ["instance_name"]
-        }
-    },
-    "minProperties": 1,
-    "maxProperties": 1,
-    "additionalProperties": False
-}
-
-instance_scenario_object = {
-    "title": "scenario object used to create an instance not based on any nsd",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "nets": {
-            "type": "array",
-            "minLength": 1,
-            "items": {
-                "type": "object",
-                "properties": {
-                    "name": name_schema,
-                    "external": boolean_schema,
-                    "type": {"enum": ["bridge", "ptp", "data"]},  # for overlay, underlay E-LINE, underlay E-LAN
-                },
-                "additionalProperties": False,
-                "required": ["name", "external", "type"]
-            }
-        }
-    },
-    "additionalProperties": False,
-    "required": ["nets"]
-}
-
-instance_scenario_create_schema_v01 = {
-    "title": "instance scenario create information schema v0.1",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "schema_version": {"type": "string", "enum": ["0.1"]},
-        "instance": {
-            "type": "object",
-            "properties": {
-                "mgmt_keys": {"type": "array", "items": {"type":"string"}},
-                "vduImage": name_schema,
-                "name": name_schema,
-                "description":description_schema,
-                "datacenter": name_schema,
-                "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
-                "scenario" : {"oneOff": [name_schema, instance_scenario_object]},  # can be an UUID or name or a dict
-                "action":{"enum": ["deploy","reserve","verify" ]},
-                "connect_mgmt_interfaces": {"oneOf": [boolean_schema, {"type":"object"}]},# can be true or a dict with datacenter: net_name
-                "cloud-config": cloud_config_schema, #common to all vnfs in the instance scenario
-                "vnfs":{             #mapping from scenario to datacenter
-                    "type": "object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "name": name_schema, #override vnf name
-                                "datacenter": name_schema,
-                                #"metadata": {"type": "object"},
-                                #"user_data": {"type": "string"}
-                                #"cloud-config": cloud_config_schema, #particular for a vnf
-                                "vdus": {
-                                    "type": "object",
-                                    "patternProperties": {
-                                        ".": {
-                                            "type": "object",
-                                            "properties": {
-                                                "name": name_schema,  # overrides vdu name schema
-                                                "mgmt_keys": {"type": "array", "items": {"type": "string"}},
-                                                "vduImage": name_schema,
-                                                "devices": {
-                                                    "type": "object",
-                                                    "patternProperties": {
-                                                        ".": {
-                                                            "vim_id": name_schema,
-                                                        }
-                                                    }
-                                                },
-                                                "interfaces": {
-                                                    "type": "object",
-                                                    "patternProperties": {
-                                                        ".": {
-                                                            "ip_address": ip_schema,
-                                                            "mac_address": mac_schema,
-                                                            "floating-ip": boolean_schema,
-                                                        }
-                                                    }
-                                                }
-                                            }
-                                        }
-                                    }
-                                },
-                                "networks": {
-                                    "type": "object",
-                                    "patternProperties": {
-                                        ".": {
-                                            "type": "object",
-                                            "properties": {
-                                                "vim-network-name": name_schema,
-                                                "vim-network-id": name_schema,
-                                                "ip-profile": ip_profile_schema,
-                                                "name": name_schema,
-                                            }
-                                        }
-                                    }
-                                },
-                            }
-                        }
-                    },
-                },
-                "networks":{             #mapping from scenario to datacenter
-                    "type": "object",
-                    "patternProperties":{
-                        ".": {
-                            "type": "object",
-                            "properties":{
-                                "interfaces":{
-                                    "type":"array",
-                                    "minLength":1,
-                                    "items":{
-                                        "type":"object",
-                                        "properties":{
-                                            "ip_address": ip_schema,
-                                            "datacenter": name_schema,
-                                            "vim-network-name": name_schema,
-                                            "vim-network-id": name_schema
-                                        },
-                                        "patternProperties":{
-                                            ".": {"type": "string"}
-                                        }
-                                    }
-                                },
-                                "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
-                                "ip-profile": ip_profile_schema,
-                                "use-network": {
-                                    "type": "object",
-                                    "properties": {
-                                        "instance_scenario_id": id_schema,
-                                        # "member_vnf_index": name_schema,  # if not null, network inside VNF
-                                        "osm_id": name_schema,  # sce_network osm_id or name
-                                    },
-                                    "additionalProperties": False,
-                                    "required": ["instance_scenario_id", "osm_id"]
-                                },
-                                #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
-                                "sites": {
-                                    "type":"array",
-                                    "minLength":1,
-                                    "items":{
-                                        "type":"object",
-                                        "properties":{
-                                            # By default for an scenario 'external' network openmano looks for an existing VIM network to map this external scenario network,
-                                            # for other networks openamno creates at VIM
-                                            # Use netmap-create to force to create an external scenario network
-                                            "netmap-create": {"oneOf":[name_schema,null_schema]}, #datacenter network to use. Null if must be created as an internal net
-                                            #netmap-use:   Indicates an existing VIM network that must be used for this scenario network.
-                                            #Can use both the VIM network name (if it is not ambiguous) or the VIM net UUID
-                                            #If both 'netmap-create' and 'netmap-use'are supplied, netmap-use precedes, but if fails openmano follows the netmap-create
-                                            #In oder words, it is the same as 'try to map to the VIM network (netmap-use) if exist, and if not create the network (netmap-create)
-                                            "netmap-use": name_schema, #
-                                            "vim-network-name": name_schema, #override network name
-                                            "vim-network-id": name_schema,
-                                            #"ip-profile": ip_profile_schema,
-                                            "datacenter": name_schema,
-                                        }
-                                    }
-                                },
-                            }
-                        }
-                    },
-                },
-            },
-            "additionalProperties": False,
-            "required": ["name"]
-        },
-    },
-    "required": ["instance"],
-    "additionalProperties": False
-}
-
-instance_scenario_action_schema = {
-    "title": "instance scenario action information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "start": null_schema,
-        "pause": null_schema,
-        "resume": null_schema,
-        "shutoff": null_schema,
-        "shutdown": null_schema,
-        "forceOff": null_schema,
-        "rebuild": null_schema,
-        "reboot": {
-            "type": ["object", "null"],
-        },
-        "add_public_key": {"type" : "string"},
-        "user": nameshort_schema,
-        "console": {"type": ["string", "null"], "enum": ["novnc", "xvpvnc", "rdp-html5", "spice-html5", None]},
-        "vdu-scaling": {
-            "type": "array",
-            "items": {
-                "type": "object",
-                "properties": {
-                    "vdu-id": id_schema,
-                    "osm_vdu_id": name_schema,
-                    "member-vnf-index": name_schema,
-                    "count": integer1_schema,
-                    "type": {"enum": ["create", "delete"]}
-                },
-                "additionalProperties": False,
-                "minProperties": 1,
-                "required": ["type"]
-            }
-        },
-        "vnfs": {"type": "array", "items": {"type": "string"}},
-        "vms": {"type": "array", "items": {"type": "string"}}
-    },
-    "minProperties": 1,
-    #"maxProperties": 1,
-    "additionalProperties": False
-}
-
-sdn_controller_properties={
-    "name": name_schema,
-    "dpid": {"type":"string", "pattern":"^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}$"},
-    "ip": ip_schema,
-    "port": port_schema,
-    "type": {"type": "string", "enum": ["opendaylight","floodlight","onos"]},
-    "version": {"type" : "string", "minLength":1, "maxLength":12},
-    "user": nameshort_schema,
-    "password": passwd_schema
-}
-sdn_controller_schema = {
-    "title":"sdn controller information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "sdn_controller":{
-            "type":"object",
-            "properties":sdn_controller_properties,
-            "required": ["name", "port", 'ip', 'dpid', 'type'],
-            "additionalProperties": False
-        }
-    },
-    "required": ["sdn_controller"],
-    "additionalProperties": False
-}
-
-sdn_controller_edit_schema = {
-    "title":"sdn controller update information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "sdn_controller":{
-            "type":"object",
-            "properties":sdn_controller_properties,
-            "additionalProperties": False
-        }
-    },
-    "required": ["sdn_controller"],
-    "additionalProperties": False
-}
-
-sdn_port_mapping_schema  = {
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "title":"sdn port mapping information schema",
-    "type": "object",
-    "properties": {
-        "sdn_port_mapping": {
-            "type": "array",
-            "items": {
-                "type": "object",
-                "properties": {
-                    "compute_node": nameshort_schema,
-                    "ports": {
-                        "type": "array",
-                        "items": {
-                            "type": "object",
-                            "properties": {
-                                "pci": {"OneOf": [null_schema, pci_extended_schema]},       # pci_schema,
-                                "switch_port": nameshort_schema,
-                                "switch_mac": mac_schema
-                            },
-                            "required": ["pci"]
-                        }
-                    }
-                },
-                "required": ["compute_node", "ports"]
-            }
-        }
-    },
-    "required": ["sdn_port_mapping"]
-}
-
-sdn_external_port_schema = {
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "title":"External port ingformation",
-    "type": "object",
-    "properties": {
-        "port": {"type" : "string", "minLength":1, "maxLength":60},
-        "vlan": vlan_schema,
-        "mac": mac_schema
-    },
-    "required": ["port"]
-}
diff --git a/osm_ro/openmanoclient.py b/osm_ro/openmanoclient.py
deleted file mode 100644 (file)
index e15824a..0000000
+++ /dev/null
@@ -1,1223 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-openmano python client used to interact with openmano-server
-"""
-
-import requests
-import json
-import yaml
-import logging
-import sys
-
-__author__ = "Alfonso Tierno, Pablo Montes"
-__date__ = "$09-Mar-2016 09:09:48$"
-__version__ = "0.1.0-r470"
-version_date = "Oct 2017"
-
-if sys.version_info.major == 3:
-    from urllib.parse import quote
-elif sys.version_info.major == 2:
-    from urllib import quote
-
-class OpenmanoException(Exception):
-    '''Common Exception for all openmano client exceptions'''
-
-class OpenmanoBadParamsException(OpenmanoException):
-    '''Bad or missing input parameters'''
-
-class OpenmanoResponseException(OpenmanoException):
-    '''Unexpected response from openmano server'''
-
-class OpenmanoNotFoundException(OpenmanoException):
-    '''Not found at server'''
-
-# class vnf():
-#     def __init__(self, message):
-#         print "Error: %s" %message
-#         print
-#         self.print_usage()
-#         #self.print_help()
-#         print
-#         print "Type 'openmano -h' for help"
-
-class openmanoclient():
-    headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'}
-    
-    def __init__(self, **kwargs):
-        self.username = kwargs.get("username")
-        self.password = kwargs.get("password")
-        self.endpoint_url = kwargs.get("endpoint_url")
-        self.tenant_id = kwargs.get("tenant_id")
-        self.tenant_name = kwargs.get("tenant_name")
-        self.tenant = None
-        self.datacenter_id = kwargs.get("datacenter_id")
-        self.datacenter_name = kwargs.get("datacenter_name")
-        self.datacenter = None
-        self.logger = logging.getLogger(kwargs.get('logger','manoclient'))
-        if kwargs.get("debug"):
-            self.logger.setLevel(logging.DEBUG)
-        
-    def __getitem__(self, index):
-        if index=='tenant_name':
-            return self.tenant_name
-        elif index=='tenant_id':
-            return self.tenant_id
-        elif index=='datacenter_name':
-            return self.datacenter_name
-        elif index=='datacenter_id':
-            return self.datacenter_id
-        elif index=='username':
-            return self.username
-        elif index=='password':
-            return self.password
-        elif index=='endpoint_url':
-            return self.endpoint_url
-        else:
-            raise KeyError("Invalid key '%s'" %str(index))
-        
-    def __setitem__(self,index, value):
-        if index=='tenant_name':
-            self.tenant_name = value
-        elif index=='tenant_id':
-            self.tenant_id = value
-        elif index=='datacenter_name':
-            self.datacenter_name = value
-        elif index=='datacenter_id':
-            self.datacenter_id = value
-        elif index=='username':
-            self.username = value
-        elif index=='password':
-            self.password = value
-        elif index=='endpoint_url':
-            self.endpoint_url = value
-        else:
-            raise KeyError("Invalid key '%s'" %str(index)) 
-        self.tenant = None # force to reload tenant with different credentials
-        self.datacenter = None # force to reload datacenter with different credentials
-    
-    def _parse(self, descriptor, descriptor_format, response=False):
-        #try yaml
-        if descriptor_format and descriptor_format != "json" and descriptor_format != "yaml":
-            raise  OpenmanoBadParamsException("'descriptor_format' must be a 'json' or 'yaml' text")
-        if descriptor_format != "json":
-            try:
-                return yaml.load(descriptor)
-            except yaml.YAMLError as exc:
-                error_pos = ""
-                if hasattr(exc, 'problem_mark'):
-                    mark = exc.problem_mark
-                    error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
-                error_text = "yaml format error" + error_pos
-        elif descriptor_format != "yaml":
-            try:
-                return json.loads(descriptor) 
-            except Exception as e:
-                if response:
-                    error_text = "json format error" + str(e)
-
-        if response:
-            raise OpenmanoResponseException(error_text)
-        raise  OpenmanoBadParamsException(error_text)
-    
-    def _parse_yaml(self, descriptor, response=False):
-        try:
-            return yaml.load(descriptor)
-        except yaml.YAMLError as exc:
-            error_pos = ""
-            if hasattr(exc, 'problem_mark'):
-                mark = exc.problem_mark
-                error_pos = " at line:{} column:{}s".format(mark.line+1, mark.column+1)
-            error_text = "yaml format error" + error_pos
-            if response:
-                raise OpenmanoResponseException(error_text)
-            raise  OpenmanoBadParamsException(error_text)
-
-    
-    def _get_item_uuid(self, item, item_id=None, item_name=None, all_tenants=False):
-        if all_tenants == None:
-            tenant_text = ""
-        elif all_tenants == False:
-            tenant_text = "/" + self.tenant
-        else:
-            tenant_text = "/any"
-        URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
-        self.logger.debug("GET %s", URLrequest )
-        mano_response = requests.get(URLrequest, headers=self.headers_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-        content = self._parse_yaml(mano_response.text, response=True)
-        #print content
-        found = 0
-        if not item_id and not item_name:
-            raise OpenmanoResponseException("Missing either {0}_name or {0}_id".format(item[:-1]))
-        for i in content[item]:
-            if item_id and i["uuid"] == item_id:
-                return item_id
-            elif item_name and i["name"] == item_name:
-                uuid = i["uuid"]
-                found += 1
-            
-        if found == 0:
-            if item_id:
-                raise OpenmanoNotFoundException("No {} found with id '{}'".format(item[:-1], item_id))
-            else:
-                #print(item, item_name)
-                raise OpenmanoNotFoundException("No {} found with name '{}'".format(item[:-1], item_name) )
-        elif found > 1:
-            raise OpenmanoNotFoundException("{} {} found with name '{}'. uuid must be used".format(found, item, item_name))
-        return uuid
-
-    def _get_item(self, item, uuid=None, name=None, all_tenants=False):
-        if all_tenants:
-            tenant_text = "/any"
-        elif all_tenants==None:
-            tenant_text = ""
-        else:
-            tenant_text = "/"+self._get_tenant()
-        if not uuid:
-            #check that exist
-            uuid = self._get_item_uuid(item, uuid, name, all_tenants)
-        
-        URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
-        self.logger.debug("GET %s", URLrequest )
-        mano_response = requests.get(URLrequest, headers=self.headers_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code==200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))        
-
-    def _get_tenant(self):
-        if not self.tenant:
-            self.tenant = self._get_item_uuid("tenants", self.tenant_id, self.tenant_name, None)
-        return self.tenant
-    
-    def _get_datacenter(self):
-        if not self.tenant:
-            self._get_tenant()
-        if not self.datacenter:
-            self.datacenter = self._get_item_uuid("datacenters", self.datacenter_id, self.datacenter_name, False)
-        return self.datacenter
-
-    def _create_item(self, item, descriptor, all_tenants=False, api_version=None):
-        if all_tenants:
-            tenant_text = "/any"
-        elif all_tenants is None:
-            tenant_text = ""
-        else:
-            tenant_text = "/"+self._get_tenant()
-        payload_req = yaml.safe_dump(descriptor)
-
-        api_version_text = ""
-        if api_version:
-            api_version_text = "/v3"
-            
-        #print payload_req
-            
-        URLrequest = "{}{apiver}{tenant}/{item}".format(self.endpoint_url, apiver=api_version_text, tenant=tenant_text,
-                                                        item=item)
-        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
-        mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
-        self.logger.debug("openmano response: %s", mano_response.text)
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code == 200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))        
-
-    def _del_item(self, item, uuid=None, name=None, all_tenants=False):
-        if all_tenants:
-            tenant_text = "/any"
-        elif all_tenants==None:
-            tenant_text = ""
-        else:
-            tenant_text = "/"+self._get_tenant()
-        if not uuid:
-            #check that exist
-            uuid = self._get_item_uuid(item, uuid, name, all_tenants)
-        
-        URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
-        self.logger.debug("DELETE %s", URLrequest )
-        mano_response = requests.delete(URLrequest, headers = self.headers_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code==200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))        
-    
-    def _list_item(self, item, all_tenants=False, filter_dict=None):
-        if all_tenants:
-            tenant_text = "/any"
-        elif all_tenants==None:
-            tenant_text = ""
-        else:
-            tenant_text = "/"+self._get_tenant()
-        
-        URLrequest = "{}{}/{}".format(self.endpoint_url, tenant_text, item)
-        separator="?"
-        if filter_dict:
-            for k in filter_dict:
-                URLrequest += separator + quote(str(k)) + "=" + quote(str(filter_dict[k])) 
-                separator = "&"
-        self.logger.debug("openmano GET %s", URLrequest)
-        mano_response = requests.get(URLrequest, headers=self.headers_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code==200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))        
-
-    def _edit_item(self, item, descriptor, uuid=None, name=None, all_tenants=False):
-        if all_tenants:
-            tenant_text = "/any"
-        elif all_tenants==None:
-            tenant_text = ""
-        else:
-            tenant_text = "/"+self._get_tenant()
-
-        if not uuid:
-            #check that exist
-            uuid = self._get_item_uuid("tenants", uuid, name, all_tenants)
-        
-        payload_req = yaml.safe_dump(descriptor)
-            
-        #print payload_req
-            
-        URLrequest = "{}{}/{}/{}".format(self.endpoint_url, tenant_text, item, uuid)
-        self.logger.debug("openmano PUT %s %s", URLrequest, payload_req)
-        mano_response = requests.put(URLrequest, headers = self.headers_req, data=payload_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code==200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))        
-
-    #TENANTS
-    def list_tenants(self, **kwargs):
-        '''Obtain a list of tenants
-        Params: can be filtered by 'uuid','name','description'
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'tenants':[{tenant1_info},{tenant2_info},...]}}
-        '''
-        return self._list_item("tenants", all_tenants=None, filter_dict=kwargs)
-
-    def get_tenant(self, uuid=None, name=None):
-        '''Obtain the information of a tenant
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'tenant':{tenant_info}}
-        '''
-        return self._get_item("tenants", uuid, name, all_tenants=None)
-
-    def delete_tenant(self, uuid=None, name=None):
-        '''Delete a tenant
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'result': text indicating deleted}
-        '''
-        return self._del_item("tenants", uuid, name, all_tenants=None)
-
-    def create_tenant(self, descriptor=None, descriptor_format=None, name=None, description=None):
-        '''Creates a tenant
-        Params: must supply a descriptor or/and just a name
-            descriptor: with format {'tenant':{new_tenant_info}}
-                newtenant_info must contain 'name', and optionally 'description'
-                must be a dictionary or a json/yaml text.
-            name: the tenant name. Overwrite descriptor name if any
-            description: tenant descriptor.. Overwrite descriptor description if any
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'tenant':{new_tenant_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self._parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif name:
-            descriptor={"tenant": {"name": name}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-
-        if 'tenant' not in descriptor or len(descriptor)!=1:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
-        if name:
-            descriptor['tenant']['name'] = name
-        if description:
-            descriptor['tenant']['description'] = description
-
-        return self._create_item("tenants", descriptor, all_tenants=None)
-
-    def edit_tenant(self, uuid=None, name=None, descriptor=None, descriptor_format=None, new_name=None, new_description=None):
-        '''Edit the parameters of a tenant
-        Params: must supply a descriptor or/and a new_name or new_description
-            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-            descriptor: with format {'tenant':{params to change info}}
-                must be a dictionary or a json/yaml text.
-            name: the tenant name. Overwrite descriptor name if any
-            description: tenant descriptor.. Overwrite descriptor description if any
-        Return: Raises an exception on error, not found or found several
-                Obtain a dictionary with format {'tenant':{newtenant_info}}
-        '''
-
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif new_name or new_description:
-            descriptor={"tenant": {}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-
-        if 'tenant' not in descriptor or len(descriptor)!=1:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
-        if new_name:
-            descriptor['tenant']['name'] = new_name
-        if new_description:
-            descriptor['tenant']['description'] = new_description
-
-        return self._edit_item("tenants", descriptor, uuid, name, all_tenants=None)
-
-    #DATACENTERS
-
-    def list_datacenters(self, all_tenants=False, **kwargs):
-        '''Obtain a list of datacenters, that are the VIM information at openmano
-        Params: can be filtered by 'uuid','name','vim_url','type'
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'datacenters':[{datacenter1_info},{datacenter2_info},...]}}
-        '''
-        return self._list_item("datacenters", all_tenants, filter_dict=kwargs)
-
-    def get_datacenter(self, uuid=None, name=None, all_tenants=False):
-        '''Obtain the information of a datacenter
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'datacenter':{datacenter_info}}
-        '''
-        return self._get_item("datacenters", uuid, name, all_tenants)
-
-    def delete_datacenter(self, uuid=None, name=None):
-        '''Delete a datacenter
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several, not free
-                Obtain a dictionary with format {'result': text indicating deleted}
-        '''
-        if not uuid:
-            # check that exist
-            uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
-        return self._del_item("datacenters", uuid, name, all_tenants=None)
-
-    def create_datacenter(self, descriptor=None, descriptor_format=None, name=None, vim_url=None, **kwargs):
-#, type="openvim", public=False, description=None):
-        '''Creates a datacenter
-        Params: must supply a descriptor or/and just a name and vim_url
-            descriptor: with format {'datacenter':{new_datacenter_info}}
-                newdatacenter_info must contain 'name', 'vim_url', and optionally 'description'
-                must be a dictionary or a json/yaml text.
-            name: the datacenter name. Overwrite descriptor name if any
-            vim_url: the datacenter URL. Overwrite descriptor vim_url if any
-            vim_url_admin: the datacenter URL for administrative issues. Overwrite descriptor vim_url if any
-            vim_type: the datacenter type, can be openstack or openvim. Overwrite descriptor type if any
-            public: boolean, by default not public
-            description: datacenter description. Overwrite descriptor description if any
-            config: dictionary with extra configuration for the concrete datacenter
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif name and vim_url:
-            descriptor={"datacenter": {"name": name, "vim_url": vim_url}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor, or name and vim_url")
-        
-        if 'datacenter' not in descriptor or len(descriptor)!=1:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
-        if name:
-            descriptor['datacenter']['name'] = name
-        if vim_url:
-            descriptor['datacenter']['vim_url'] = vim_url
-        for param in kwargs:
-            descriptor['datacenter'][param] = kwargs[param]
-
-        return self._create_item("datacenters", descriptor, all_tenants=None)
-
-    def edit_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
-        '''Edit the parameters of a datacenter
-        Params: must supply a descriptor or/and a parameter to change
-            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-            descriptor: with format {'datacenter':{params to change info}}
-                must be a dictionary or a json/yaml text.
-            parameters to change can be supplyied by the descriptor or as parameters:
-                new_name: the datacenter name
-                vim_url: the datacenter URL
-                vim_url_admin: the datacenter URL for administrative issues
-                vim_type: the datacenter type, can be openstack or openvim.
-                public: boolean, available to other tenants
-                description: datacenter description
-        Return: Raises an exception on error, not found or found several
-                Obtain a dictionary with format {'datacenter':{new_datacenter_info}}
-        '''
-
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif kwargs:
-            descriptor={"datacenter": {}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-
-        if 'datacenter' not in descriptor or len(descriptor)!=1:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'datacenter' field")
-        for param in kwargs:
-            if param=='new_name':
-                descriptor['datacenter']['name'] = kwargs[param]
-            else:
-                descriptor['datacenter'][param] = kwargs[param]
-        return self._edit_item("datacenters", descriptor, uuid, name, all_tenants=None)
-    
-    def attach_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None,  vim_user=None, vim_password=None, vim_tenant_name=None, vim_tenant_id=None):
-        #check that exist
-        uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=True)
-        tenant_text = "/"+self._get_tenant()
-
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif vim_user or vim_password or vim_tenant_name or vim_tenant_id:
-            descriptor={"datacenter": {}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor or params")
-        
-        if vim_user or vim_password or vim_tenant_name or vim_tenant_id:
-            #print args.name
-            try:
-                if vim_user:
-                    descriptor['datacenter']['vim_user'] = vim_user
-                if vim_password:
-                    descriptor['datacenter']['vim_password'] = vim_password
-                if vim_tenant_name:
-                    descriptor['datacenter']['vim_tenant_name'] = vim_tenant_name
-                if vim_tenant_id:
-                    descriptor['datacenter']['vim_tenant'] = vim_tenant_id
-            except (KeyError, TypeError) as e:
-                if str(e)=='datacenter':           error_pos= "missing field 'datacenter'"
-                else:                       error_pos="wrong format"
-                raise OpenmanoBadParamsException("Wrong datacenter descriptor: " + error_pos)
-
-        payload_req = yaml.safe_dump(descriptor)
-        #print payload_req
-        URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
-        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
-        mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code==200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))        
-
-    def detach_datacenter(self, uuid=None, name=None):
-        if not uuid:
-            #check that exist
-            uuid = self._get_item_uuid("datacenters", uuid, name, all_tenants=False)
-        tenant_text = "/"+self._get_tenant()
-        URLrequest = "{}{}/datacenters/{}".format(self.endpoint_url, tenant_text, uuid)
-        self.logger.debug("openmano DELETE %s", URLrequest)
-        mano_response = requests.delete(URLrequest, headers = self.headers_req)
-        self.logger.debug("openmano response: %s", mano_response.text )
-    
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code==200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))
-
-    # WIMS
-
-    def list_wims(self, all_tenants=False, **kwargs):
-        '''Obtain a list of wims, that are the WIM information at openmano
-        Params: can be filtered by 'uuid','name','wim_url','type'
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'wims':[{wim1_info},{wim2_info},...]}}
-        '''
-        return self._list_item("wims", all_tenants, filter_dict=kwargs)
-
-    def get_wim(self, uuid=None, name=None, all_tenants=False):
-        '''Obtain the information of a wim
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'wim':{wim_info}}
-        '''
-        return self._get_item("wims", uuid, name, all_tenants)
-
-    def delete_wim(self, uuid=None, name=None):
-        '''Delete a wim
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several, not free
-                Obtain a dictionary with format {'result': text indicating deleted}
-        '''
-        if not uuid:
-            # check that exist
-            uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
-        return self._del_item("wims", uuid, name, all_tenants=None)
-
-    def create_wim(self, descriptor=None, descriptor_format=None, name=None, wim_url=None, **kwargs):
-        # , type="openvim", public=False, description=None):
-        '''Creates a wim
-        Params: must supply a descriptor or/and just a name and a wim_url
-            descriptor: with format {'wim':{new_wim_info}}
-                new_wim_info must contain 'name', 'wim_url', and optionally 'description'
-                must be a dictionary or a json/yaml text.
-            name: the wim name. Overwrite descriptor name if any
-            wim_url: the wim URL. Overwrite descriptor vim_url if any
-            wim_type: the WIM type, can be tapi, odl, onos. Overwrite descriptor type if any
-            public: boolean, by default not public
-            description: wim description. Overwrite descriptor description if any
-            config: dictionary with extra configuration for the concrete wim
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'wim:{new_wim_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif name and wim_url:
-            descriptor = {"wim": {"name": name, "wim_url": wim_url}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor, or name and wim_url")
-
-        if 'wim' not in descriptor or len(descriptor) != 1:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
-        if name:
-            descriptor['wim']['name'] = name
-        if wim_url:
-            descriptor['wim']['wim_url'] = wim_url
-        for param in kwargs:
-            descriptor['wim'][param] = kwargs[param]
-
-        return self._create_item("wims", descriptor, all_tenants=None)
-
-    def edit_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False,
-                        **kwargs):
-        '''Edit the parameters of a wim
-        Params: must supply a descriptor or/and a parameter to change
-            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-            descriptor: with format {'wim':{params to change info}}
-                must be a dictionary or a json/yaml text.
-            parameters to change can be supplied by the descriptor or as parameters:
-                new_name: the wim name
-                wim_url: the wim URL
-                wim_type: the wim type, can be tapi, onos, odl
-                public: boolean, available to other tenants
-                description: wim description
-        Return: Raises an exception on error, not found or found several
-                Obtain a dictionary with format {'wim':{new_wim_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif kwargs:
-            descriptor = {"wim": {}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-
-        if 'wim' not in descriptor or len(descriptor) != 1:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
-        for param in kwargs:
-            if param == 'new_name':
-                descriptor['wim']['name'] = kwargs[param]
-            else:
-                descriptor['wim'][param] = kwargs[param]
-        return self._edit_item("wims", descriptor, uuid, name, all_tenants=None)
-
-    def attach_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, wim_user=None,
-                          wim_password=None, wim_tenant_name=None, wim_tenant_id=None):
-        # check that exist
-        uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
-        tenant_text = "/" + self._get_tenant()
-
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif wim_user or wim_password or wim_tenant_name or wim_tenant_id:
-            descriptor = {"wim": {}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor or params")
-
-        if wim_user or wim_password or wim_tenant_name or wim_tenant_id:
-            # print args.name
-            try:
-                if wim_user:
-                    descriptor['wim']['wim_user'] = wim_user
-                if wim_password:
-                    descriptor['wim']['wim_password'] = wim_password
-                if wim_tenant_name:
-                    descriptor['wim']['wim_tenant_name'] = wim_tenant_name
-                if wim_tenant_id:
-                    descriptor['wim']['wim_tenant'] = wim_tenant_id
-            except (KeyError, TypeError) as e:
-                if str(e) == 'wim':
-                    error_pos = "missing field 'wim'"
-                else:
-                    error_pos = "wrong format"
-                raise OpenmanoBadParamsException("Wrong wim descriptor: " + error_pos)
-
-        payload_req = yaml.safe_dump(descriptor)
-        # print payload_req
-        URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
-        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
-        mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
-        self.logger.debug("openmano response: %s", mano_response.text)
-
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code == 200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))
-
-    def detach_wim(self, uuid=None, name=None):
-        if not uuid:
-            # check that exist
-            uuid = self._get_item_uuid("wims", uuid, name, all_tenants=False)
-        tenant_text = "/" + self._get_tenant()
-        URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
-        self.logger.debug("openmano DELETE %s", URLrequest)
-        mano_response = requests.delete(URLrequest, headers=self.headers_req)
-        self.logger.debug("openmano response: %s", mano_response.text)
-
-        content = self._parse_yaml(mano_response.text, response=True)
-        if mano_response.status_code == 200:
-            return content
-        else:
-            raise OpenmanoResponseException(str(content))
-
-    #VNFS
-    def list_vnfs(self, all_tenants=False, **kwargs):
-        '''Obtain a list of vnfs
-        Params: can be filtered by 'uuid','name','description','public', "tenant_id"
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'vnfs':[{vnf1_info},{vnf2_info},...]}}
-        '''
-        return self._list_item("vnfs", all_tenants, kwargs)
-
-    def get_vnf(self, uuid=None, name=None, all_tenants=False):
-        '''Obtain the information of a vnf
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'vnf':{vnf_info}}
-        '''
-        return self._get_item("vnfs", uuid, name, all_tenants)
-
-    def delete_vnf(self, uuid=None, name=None, all_tenants=False):
-        '''Delete a vnf
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several, not free
-                Obtain a dictionary with format {'result': text indicating deleted}
-        '''
-        return self._del_item("vnfs", uuid, name, all_tenants)
-
-    def create_vnf(self, descriptor=None, descriptor_format=None, **kwargs):
-        '''Creates a vnf
-        Params: must supply a descriptor
-            descriptor: with format {'vnf':{new_vnf_info}}
-                must be a dictionary or a json/yaml text.
-                must be a dictionary or a json/yaml text.
-            Other parameters can be:
-                #TODO, revise
-                name: the vnf name. Overwrite descriptor name if any
-                image_path: Can be a string or a string list. Overwrite the image_path at descriptor
-                description: vnf descriptor.. Overwrite descriptor description if any
-                public: boolean, available to other tenants
-                class: user text for vnf classification
-                tenant_id: Propietary tenant
-                ...
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'vnf':{new_vnf_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-
-        try:
-            if "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
-                api_version = "v3"
-                token = "vnfd"
-                vnfd_catalog = descriptor.get("vnfd:vnfd-catalog")
-                if not vnfd_catalog:
-                    vnfd_catalog = descriptor.get("vnfd-catalog")
-                vnfds = vnfd_catalog.get("vnfd:vnfd")
-                if not vnfds:
-                    vnfds = vnfd_catalog.get("vnfd")
-                vnfd = vnfds[0]
-                vdu_list = vnfd["vdu"]
-            elif "vnf" in descriptor:  # old API
-                api_version = None
-                token = "vnfs"
-                vnfd = descriptor['vnf']
-                vdu_list = vnfd["VNFC"]
-            else:
-                raise OpenmanoBadParamsException("Invalid VNF Descriptor must contain only one 'vnf' field or vnd-catalog")
-        except (KeyError, TypeError) as e:
-            raise OpenmanoBadParamsException("Invalid VNF Descriptor. Missing field {}".format(e))
-
-        if kwargs:
-            try:
-                if kwargs.get('name'):
-                    vnfd['name'] = kwargs['name']
-                if kwargs.get('description'):
-                    vnfd['description'] = kwargs['description']
-                if kwargs.get('image_path'):
-                    error_param = 'image_path'
-                    image_list = kwargs['image_path'].split(",")
-                    image_item = image_list.pop(0)
-                    # print "image-path", image_path_
-                    for vdu in vdu_list:
-                        if api_version == "v3":
-                            if vdu.get("image"):
-                                if image_item:
-                                    vdu['image'] = image_item
-                                    if "image-checksum" in vdu:
-                                        del vdu["image-checksum"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                            for vol in vdu.get("volumes", ()):  # image name in volumes
-                                if image_item:
-                                    vol["image"] = image_item
-                                    if "image-checksum" in vol:
-                                        del vol["image-checksum"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                        else:
-                            if image_item:
-                                vdu['VNFC image'] = image_item
-                                if "image name" in vdu:
-                                    del vdu["image name"]
-                                if "image checksum" in vdu:
-                                    del vdu["image checksum"]
-                            if image_list:
-                                image_item = image_list.pop(0)
-                            for vol in vdu.get('devices', ()):
-                                if vol['type'] != 'disk':
-                                    continue
-                                if image_item:
-                                    vol['image'] = image_item
-                                    if "image name" in vol:
-                                        del vol["image name"]
-                                    if "image checksum" in vol:
-                                        del vol["image checksum"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                if kwargs.get('image_name'):  # image name precedes if both are supplied
-                    error_param = 'image_name'
-                    image_list = kwargs['image_name'].split(",")
-                    image_item = image_list.pop(0)
-                    for vdu in vdu_list:
-                        if api_version == "v3":
-                            if vdu.get("image"):
-                                if image_item:
-                                    vdu['image'] = image_item
-                                    if "image-checksum" in vdu:
-                                        del vdu["image-checksum"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                            for vol in vdu.get("volumes", ()):  # image name in volumes
-                                if image_item:
-                                    vol["image"] = image_item
-                                    if "image-checksum" in vol:
-                                        del vol["image-checksum"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                        else:
-                            if image_item:
-                                vdu['image name'] = image_item
-                                if "VNFC image" in vdu:
-                                    del vdu["VNFC image"]
-                            if image_list:
-                                image_item = image_list.pop(0)
-                            for vol in vdu.get('devices', ()):
-                                if vol['type'] != 'disk':
-                                    continue
-                                if image_item:
-                                    vol['image name'] = image_item
-                                    if "image" in vol:
-                                        del vol["image"]
-                                    if "image checksum" in vol:
-                                        del vol["image checksum"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-
-                if kwargs.get('image_checksum'):
-                    error_param = 'image_checksum'
-                    image_list = kwargs['image_checksum'].split(",")
-                    image_item = image_list.pop(0)
-                    for vdu in vdu_list:
-                        if api_version == "v3":
-                            if vdu.get("image"):
-                                if image_item:
-                                    vdu['image-checksum'] = image_item
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                            for vol in vdu.get("volumes", ()):  # image name in volumes
-                                if image_item:
-                                    vol["mage-checksum"] = image_item
-                                if image_list:
-                                    image_item = image_list.pop(0)
-                        else:
-                            if image_item:
-                                vdu['image checksum'] = image_item
-                                if "VNFC image" in vdu:
-                                    del vdu["VNFC image"]
-                            if image_list:
-                                image_item = image_list.pop(0)
-                            for vol in vdu.get('devices', ()):
-                                if vol['type'] != 'disk':
-                                    continue
-                                if image_item:
-                                    vol['image checksum'] = image_item
-                                    if "image" in vol:
-                                        del vol["image"]
-                                if image_list:
-                                    image_item = image_list.pop(0)
-            except IndexError:
-                raise OpenmanoBadParamsException("{} contains more items than {} at descriptor".format(
-                    error_param, "vnfd-catalog:vnfd:vdu" if api_version else "vnf:VNFC"))
-            except (KeyError, TypeError) as e:
-                raise OpenmanoBadParamsException("Invalid VNF Descriptor. Missing field {}".format(e))
-        return self._create_item(token, descriptor, api_version=api_version)
-
-#     def edit_vnf(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
-#         '''Edit the parameters of a vnf
-#         Params: must supply a descriptor or/and a parameters to change
-#             uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-#             descriptor: with format {'vnf':{params to change info}}
-#             parameters to change can be supplyied by the descriptor or as parameters:
-#                 new_name: the vnf name
-#                 vim_url: the vnf URL
-#                 vim_url_admin: the vnf URL for administrative issues
-#                 vim_type: the vnf type, can be openstack or openvim.
-#                 public: boolean, available to other tenants
-#                 description: vnf description
-#         Return: Raises an exception on error, not found or found several
-#                 Obtain a dictionary with format {'vnf':{new_vnf_info}}
-#         '''
-# 
-#        if isinstance(descriptor, str):
-#            descriptor = self.parse(descriptor, descriptor_format)
-#        elif descriptor:
-#            pass
-#         elif kwargs:
-#             descriptor={"vnf": {}}
-#         else:
-#             raise OpenmanoBadParamsException("Missing descriptor")
-# 
-#         if 'vnf' not in descriptor or len(descriptor)>2:
-#             raise OpenmanoBadParamsException("Descriptor must contain only one 'vnf' field")
-#         for param in kwargs:
-#             if param=='new_name':
-#                 descriptor['vnf']['name'] = kwargs[param]
-#             else:
-#                 descriptor['vnf'][param] = kwargs[param]
-#         return self._edit_item("vnfs", descriptor, uuid, name, all_tenants=None)
-
-    #SCENARIOS
-    def list_scenarios(self, all_tenants=False, **kwargs):
-        '''Obtain a list of scenarios
-        Params: can be filtered by 'uuid','name','description','public', "tenant_id"
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'scenarios':[{scenario1_info},{scenario2_info},...]}}
-        '''
-        return self._list_item("scenarios", all_tenants, kwargs)
-
-    def get_scenario(self, uuid=None, name=None, all_tenants=False):
-        '''Obtain the information of a scenario
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'scenario':{scenario_info}}
-        '''
-        return self._get_item("scenarios", uuid, name, all_tenants)
-
-    def delete_scenario(self, uuid=None, name=None, all_tenants=False):
-        '''Delete a scenario
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several, not free
-                Obtain a dictionary with format {'result': text indicating deleted}
-        '''
-        return self._del_item("scenarios", uuid, name, all_tenants)
-
-    def create_scenario(self, descriptor=None, descriptor_format=None, **kwargs):
-        """Creates a scenario
-        Params: must supply a descriptor
-            descriptor: with format {'scenario':{new_scenario_info}}
-                must be a dictionary or a json/yaml text.
-            Other parameters can be:
-                name: the scenario name. Overwrite descriptor name if any
-                description: scenario descriptor.. Overwrite descriptor description if any
-                public: boolean, available to other tenants
-                tenant_id. Propietary tenant
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'scenario':{new_scenario_info}}
-        """
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-        
-        try:
-            if "nsd:nsd-catalog" in descriptor or "nsd-catalog" in descriptor:
-                api_version = "v3"
-                token = "nsd"
-                nsd_catalog = descriptor.get("nsd:nsd-catalog")
-                if not nsd_catalog:
-                    nsd_catalog = descriptor.get("nsd-catalog")
-                nsds = nsd_catalog.get("nsd:nsd")
-                if not nsds:
-                    nsds = nsd_catalog.get("nsd")
-                nsd = nsds[0]
-            elif "scenario" in descriptor:  # old API
-                api_version = None
-                token = "scenarios"
-                nsd = descriptor['scenario']
-            else:
-                raise OpenmanoBadParamsException("Invalid NS Descriptor must contain only one 'scenario' field or nsd-catalog")
-        except (KeyError, TypeError) as e:
-            raise OpenmanoBadParamsException("Invalid NS Descriptor. Missing field {}".format(e))
-
-        for param in kwargs:
-            nsd[param] = kwargs[param]
-        return self._create_item(token, descriptor, api_version=api_version)
-
-    def edit_scenario(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs):
-        '''Edit the parameters of a scenario
-        Params: must supply a descriptor or/and a parameters to change
-            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-            descriptor: with format {'scenario':{params to change info}}
-                must be a dictionary or a json/yaml text.
-            parameters to change can be supplyied by the descriptor or as parameters:
-                new_name: the scenario name
-                public: boolean, available to other tenants
-                description: scenario description
-                tenant_id. Propietary tenant
-        Return: Raises an exception on error, not found or found several
-                Obtain a dictionary with format {'scenario':{new_scenario_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif kwargs:
-            descriptor={"scenario": {}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-        if 'scenario' not in descriptor or len(descriptor)>2:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'scenario' field")
-        for param in kwargs:
-            if param=='new_name':
-                descriptor['scenario']['name'] = kwargs[param]
-            else:
-                descriptor['scenario'][param] = kwargs[param]
-        return self._edit_item("scenarios", descriptor, uuid, name, all_tenants=None)
-
-
-    #INSTANCE-SCENARIOS
-    def list_instances(self, all_tenants=False, **kwargs):
-        '''Obtain a list of instances
-        Params: can be filtered by 'uuid','name','description','scenario_id', "tenant_id"
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'instances':[{instance1_info},{instance2_info},...]}}
-        '''
-        return self._list_item("instances", all_tenants, kwargs)
-
-    def get_instance(self, uuid=None, name=None, all_tenants=False):
-        '''Obtain the information of a instance
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several
-                Obtain a dictionary with format {'instance':{instance_info}}
-        '''
-        return self._get_item("instances", uuid, name, all_tenants)
-
-    def delete_instance(self, uuid=None, name=None, all_tenants=False):
-        '''Delete a instance
-        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
-        Return: Raises an exception on error, not found, found several, not free
-                Obtain a dictionary with format {'result': text indicating deleted}
-        '''
-        return self._del_item("instances", uuid, name, all_tenants)
-
-    def create_instance(self, descriptor=None, descriptor_format=None, name=None, **kwargs):
-        '''Creates a instance
-        Params: must supply a descriptor or/and a name and scenario
-            descriptor: with format {'instance':{new_instance_info}}
-                must be a dictionary or a json/yaml text.
-            name: the instance name. Overwrite descriptor name if any
-            Other parameters can be:
-                description: instance descriptor.. Overwrite descriptor description if any
-                datacenter_name, datacenter_id: datacenter  where to be deployed
-                scenario_name, scenario_id: Scenario this instance is based on
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'instance':{new_instance_info}}
-        '''
-        if isinstance(descriptor, str):
-            descriptor = self.parse(descriptor, descriptor_format)
-        elif descriptor:
-            pass
-        elif name and ("scenario_name" in kwargs or "scenario_id" in kwargs):
-            descriptor = {"instance": {"name": name}}
-        else:
-            raise OpenmanoBadParamsException("Missing descriptor")
-        
-        if 'instance' not in descriptor or len(descriptor)>2:
-            raise OpenmanoBadParamsException("Descriptor must contain only one 'instance' field, and an optional version")
-        if name:
-            descriptor['instance']["name"] = name
-        if "scenario_name" in kwargs or "scenario_id" in kwargs:
-            descriptor['instance']["scenario"] = self._get_item_uuid("scenarios", kwargs.get("scenario_id"), kwargs.get("scenario_name"))
-        if "datacenter_name" in kwargs or "datacenter_id" in kwargs:
-            descriptor['instance']["datacenter"] = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"))
-        if "description" in kwargs:
-            descriptor['instance']["description"] = kwargs.get("description")
-        #for param in kwargs:
-        #    descriptor['instance'][param] = kwargs[param]
-        if "datacenter" not in descriptor['instance']:
-            descriptor['instance']["datacenter"] = self._get_datacenter()
-        return self._create_item("instances", descriptor)
-
-    #VIM ACTIONS
-    def vim_action(self, action, item, uuid=None, all_tenants=False, **kwargs):
-        '''Perform an action over a vim
-        Params: 
-            action: can be 'list', 'get'/'show', 'delete' or 'create'
-            item: can be 'tenants' or 'networks'
-            uuid: uuid of the tenant/net to show or to delete. Ignore otherwise
-            other parameters:
-                datacenter_name, datacenter_id: datacenters to act on, if missing uses classes store datacenter 
-                descriptor, descriptor_format: descriptor needed on creation, can be a dict or a yaml/json str 
-                    must be a dictionary or a json/yaml text.
-                name: for created tenant/net Overwrite descriptor name if any
-                description: tenant descriptor. Overwrite descriptor description if any
-                
-        Return: Raises an exception on error
-                Obtain a dictionary with format {'tenant':{new_tenant_info}}
-        '''
-        if item not in ("tenants", "networks", "images"):
-            raise OpenmanoBadParamsException("Unknown value for item '{}', must be 'tenants', 'nets' or "
-                                             "images".format(str(item)))
-
-        image_actions = ['list','get','show','delete']
-        if item == "images" and action not in image_actions:
-            raise OpenmanoBadParamsException("Only available actions for item '{}' are {}\n"
-                                             "Requested action was '{}'".format(item, ', '.join(image_actions), action))
-        if all_tenants:
-            tenant_text = "/any"
-        else:
-            tenant_text = "/"+self._get_tenant()
-        
-        if "datacenter_id" in kwargs or "datacenter_name" in kwargs:
-            datacenter = self._get_item_uuid("datacenters", kwargs.get("datacenter_id"), kwargs.get("datacenter_name"), all_tenants=all_tenants)
-        else:
-            datacenter = self._get_datacenter()
-
-        if action=="list":
-            URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
-            self.logger.debug("GET %s", URLrequest )
-            mano_response = requests.get(URLrequest, headers=self.headers_req)
-            self.logger.debug("openmano response: %s", mano_response.text )
-            content = self._parse_yaml(mano_response.text, response=True)            
-            if mano_response.status_code==200:
-                return content
-            else:
-                raise OpenmanoResponseException(str(content))        
-        elif action=="get" or action=="show":
-            URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
-            self.logger.debug("GET %s", URLrequest )
-            mano_response = requests.get(URLrequest, headers=self.headers_req)
-            self.logger.debug("openmano response: %s", mano_response.text )
-            content = self._parse_yaml(mano_response.text, response=True)            
-            if mano_response.status_code==200:
-                return content
-            else:
-                raise OpenmanoResponseException(str(content))        
-        elif action=="delete":
-            URLrequest = "{}{}/vim/{}/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item, uuid)
-            self.logger.debug("DELETE %s", URLrequest )
-            mano_response = requests.delete(URLrequest, headers=self.headers_req)
-            self.logger.debug("openmano response: %s", mano_response.text )
-            content = self._parse_yaml(mano_response.text, response=True)            
-            if mano_response.status_code==200:
-                return content
-            else:
-                raise OpenmanoResponseException(str(content))        
-        elif action=="create":
-            if "descriptor" in kwargs:
-                if isinstance(kwargs["descriptor"], str):
-                    descriptor = self._parse(kwargs["descriptor"], kwargs.get("descriptor_format") )
-                else:
-                    descriptor = kwargs["descriptor"]
-            elif "name" in kwargs:
-                descriptor={item[:-1]: {"name": kwargs["name"]}}
-            else:
-                raise OpenmanoResponseException("Missing descriptor")
-        
-            if item[:-1] not in descriptor or len(descriptor)!=1:
-                raise OpenmanoBadParamsException("Descriptor must contain only one 'tenant' field")
-            if "name" in kwargs:
-                descriptor[ item[:-1] ]['name'] = kwargs["name"]
-            if "description" in kwargs:
-                descriptor[ item[:-1] ]['description'] = kwargs["description"]
-            payload_req = yaml.safe_dump(descriptor)
-            #print payload_req
-            URLrequest = "{}{}/vim/{}/{}".format(self.endpoint_url, tenant_text, datacenter, item)
-            self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
-            mano_response = requests.post(URLrequest, headers = self.headers_req, data=payload_req)
-            self.logger.debug("openmano response: %s", mano_response.text )
-            content = self._parse_yaml(mano_response.text, response=True)
-            if mano_response.status_code==200:
-                return content
-            else:
-                raise OpenmanoResponseException(str(content))
-        else:
-            raise OpenmanoBadParamsException("Unknown value for action '{}".format(str(action))) 
-
diff --git a/osm_ro/openmanod.cfg b/osm_ro/openmanod.cfg
deleted file mode 100644 (file)
index 3565bbf..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#HTTP Server parameters (MANO API). IP address and port where openmanod listens
-# IPtables/firewalld must allow this port
-# for CentOS/Redhad firewalld is configured at '/etc/firewalld/services/openmanod.xml'
-# edit this file and reload firewalld with 'firewall-cmd --reload' if port is changed
-http_host:       0.0.0.0      # IP address, (by default, 0.0.0.0 means that it will listen in all interfaces)
-http_port:       9090         # General port (by default, 9090)
-#http_admin_port: 9095        # Admin port where openmano is listening (when missing, no administration server is launched)
-                              # Not used in current version!
-
-#Parameters for a VIM console access. Can be directly the VIM URL or a proxy to offer the openmano IP address
-http_console_proxy: False    #by default True. If False proxy is not implemented and VIM URL is offered. It is
-                              #assumed then, that client can access directly to the VIMs
-#http_console_host: <ip>       #by default the same as 'http_host'. However is openmano server is behind a NAT/proxy
-                              #you should specify the public IP used to access the server. Also when 'http_host' is 
-                              #0.0.0.0 you should specify the concrete IP address (or name) the server is accessed 
-# Ports to be used. Comma separated list. Can contain a {"from":<port>, "to":<port>} entry
-#e.g. from 9000 to 9005: [{"from":9000, "to":9005}], or also [9000,9001,9002,9003,9004,9005]
-#e.g. from 9000 to 9100 apart from 9050,9053: [{"from":9000, "to":9049},9051,9052,{"from":9054, "to":9099}]
-http_console_ports: [{"from":9096, "to":9110}]
-
-#Database parameters
-db_host:   localhost          # by default localhost
-db_user:   mano               # DB user
-db_passwd: manopw             # DB password
-db_name:   mano_db            # Name of the MANO DB
-# Database ovim parameters
-db_ovim_host:   localhost          # by default localhost
-db_ovim_user:   mano               # DB user
-db_ovim_passwd: manopw             # DB password
-db_ovim_name:   mano_vim_db        # Name of the OVIM MANO DB
-
-
-#other MANO parameters
-#  Folder where the VNF descriptors will be stored
-#  The folder will be created in the execution folder if it does not exist
-#vnf_repository: "./vnfrepo"  # Use an absolute path to avoid misunderstandings
-
-#   Indicates if at VNF onboarding, flavors and images are loaded at all related VIMs,
-#   in order to speed up the later instantiation.
-auto_push_VNF_to_VIMs: False  # by default True
-
-#general logging parameters 
-   #choose among: DEBUG, INFO, WARNING, ERROR, CRITICAL
-log_level:         INFO  #general log levels for internal logging
-#standard output is used unless 'log_file' is specify 
-#log_file:          /var/log/openmano/openmano.log
-
-#individual logging settings
-log_level_db:      ERROR  #database log levels
-#log_file_db:       /opt/openmano/logs/openmano_db.log
-#log_level_vim:     DEBUG  #VIM connection log levels
-#log_file_vim:      /opt/openmano/logs/openmano_vimconn.log
-#log_level_wim:     DEBUG  #WIM connection log levels
-#log_file_wim:      /opt/openmano/logs/openmano_wimconn.log
-#log_level_nfvo:    DEBUG  #Main engine log levels
-#log_file_nfvo:     /opt/openmano/logs/openmano_nfvo.log
-#log_level_http:    DEBUG  #Main engine log levels
-#log_file_http:     /opt/openmano/logs/openmano_http.log
-#log_level_console: DEBUG  #proxy console log levels
-#log_file_console:  /opt/openmano/logs/openmano_console.log
-#log_level_ovim:    DEBUG  #ovim library log levels
-#log_file_ovim:     /opt/openmano/logs/openmano_ovim.log
-
-#Uncomment to send logs via IP to an external host
-#log_socket_host:   localhost
-log_socket_port:   9022
-log_socket_level:  DEBUG  #general log levels for socket logging      
diff --git a/osm_ro/osm-ro.service b/osm_ro/osm-ro.service
deleted file mode 100644 (file)
index 2246885..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=openmano server (OSM RO)
-After=mysql.service
-
-[Service]
-ExecStart=/usr/bin/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-
diff --git a/osm_ro/tests/__init__.py b/osm_ro/tests/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/osm_ro/tests/db_helpers.py b/osm_ro/tests/db_helpers.py
deleted file mode 100644 (file)
index bedf9a5..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-import hashlib
-import shlex
-import unittest
-from contextlib import contextmanager
-from functools import wraps
-from hashlib import md5
-from os import environ, pathsep
-from subprocess import STDOUT, check_output
-from uuid import UUID
-
-from MySQLdb import connect
-
-from ..nfvo_db import nfvo_db
-
-HOST = environ.get('TEST_DB_HOST', 'localhost')
-USER = environ.get('TEST_DB_USER', 'mano')
-PASSWORD = environ.get('TEST_DB_PASSWORD', 'manopw')
-DATABASE = environ.get('TEST_DB_DATABASE', 'mano_db')
-
-
-def uuid(seed):
-    """Generates strings with a UUID format in a repeatable way"""
-    return str(UUID(md5(str(seed)).hexdigest()))
-
-
-def sha1(text):
-    """Generates SHA1 hash code from a text string"""
-    return hashlib.sha1(text).hexdigest()
-
-
-def run(*args, **kwargs):
-    """Run a command inside a subprocess, raising an exception when it fails
-
-    Arguments:
-        *args: you can pass any number of arquments as separated words in the
-            shell, or just a single string with the entire command
-        **kwargs: proxied to subprocess.check_output (by default
-            ``stderr=STDOUT`` and ``universal_newlines=True``
-    """
-    if len(args) == 1 and isinstance(args[0], str):
-        args = shlex.split(args[0])
-
-    opts = dict(stderr=STDOUT, universal_newlines=True)
-    opts.update(kwargs)
-    return check_output(args, **opts)
-
-
-# In order to not mess around, enforce user to explicit set the
-# test database in a env variable
-@unittest.skipUnless(
-    environ.get('TEST_DB_HOST'),
-    'Test database not available. Please set TEST_DB_HOST env var')
-class TestCaseWithDatabase(unittest.TestCase):
-    """Connect to the database and provide methods to facilitate isolating the
-    database stored inside it between tests.
-
-    In order to avoid connecting, reconnecting, creating tables and destroying
-    tables all the time, this class manage the database using class-level
-    fixtures. This reduce the cost of performing these actions but not
-    guarantees isolation in the DB state between the tests.
-    To enforce isolation, please call the ``setup_tables`` and
-    ``empty_database`` directly, or write one single test per class.
-    """
-
-    host = HOST
-    user = USER
-    password = PASSWORD
-    database = DATABASE
-
-    @classmethod
-    def setup_tables(cls):
-        """Make sure the database is set up and in the right version, with all the
-        required tables.
-        """
-        dbutils = environ.get('DBUTILS')
-
-        if dbutils:
-            environ["PATH"] += pathsep + dbutils
-
-        return run('init_mano_db.sh',
-                   '-u', cls.user,
-                   '-p', cls.password,
-                   '-h', cls.host,
-                   '-d', cls.database)
-
-    @classmethod
-    def empty_database(cls):
-        """Clear the database, so one test does not interfere with the other"""
-        # Create a custom connection not attached to the database, so we can
-        # destroy and recreate the database itself
-        connection = connect(cls.host, cls.user, cls.password)
-        cursor = connection.cursor()
-        cursor.execute(
-            "DROP DATABASE {};".format(
-                connection.escape_string(cls.database)))
-        cursor.execute(
-            "CREATE DATABASE {};".format(
-                connection.escape_string(cls.database)))
-        cursor.close()
-        connection.close()
-
-
-class TestCaseWithDatabasePerTest(TestCaseWithDatabase):
-    """Ensure a connection to the database before and
-    drop tables after each test runs
-    """
-
-    def setUp(self):
-        self.setup_tables()
-        self.addCleanup(self.empty_database)
-
-        self.maxDiff = None
-
-        self.db = nfvo_db(self.host, self.user, self.password, self.database)
-        self.db.connect()
-
-    def populate(self, seeds=None, **kwargs):
-        """Seed the database with initial values"""
-        if not seeds:
-            seeds = []
-        if not isinstance(seeds, (list, tuple)):
-            seeds = [seeds]
-        if kwargs:
-            seeds.append(kwargs)
-        self.db.new_rows(seeds)
-
-    def count(self, table):
-        """Count number of rows in a table"""
-        return self.db.get_rows(
-            SELECT='COUNT(*) as count', FROM=table)[0]['count']
-
-    @contextmanager
-    def disable_foreign_keys(self):
-        """Do the test without checking foreign keys"""
-        try:
-            cursor = self.db.con.cursor()
-            cursor.execute('SET FOREIGN_KEY_CHECKS=0;')
-            yield
-        finally:
-            cursor.execute('SET FOREIGN_KEY_CHECKS=1;')
-
-
-def disable_foreign_keys(test):
-    """Do the test without checking foreign keys.
-    To be used together in subclasses of TestCaseWithDatabasePerTest
-    """
-    @wraps(test)
-    def _no_check(self, *args, **kwargs):
-        with self.disable_foreign_keys():
-            result = test(self, *args, **kwargs)
-
-        return result
-
-    return _no_check
diff --git a/osm_ro/tests/helpers.py b/osm_ro/tests/helpers.py
deleted file mode 100644 (file)
index 787fbce..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-import logging
-import unittest
-from collections import defaultdict
-
-from six import StringIO
-
-from mock import MagicMock, patch
-
-logger = logging.getLogger()
-
-
-class TestCaseWithLogging(unittest.TestCase):
-    """Attach a special handler to the root logger, capturing the logs in a
-    internal buffer (caplog property).
-
-    To retrieve the logs, do::
-
-        self.caplog.getvalue()
-    """
-    def setUp(self):
-        super(TestCaseWithLogging, self).setUp()
-        self.logger = logging.getLogger()
-        self.caplog = StringIO()
-        self.log_handler = logging.StreamHandler(self.caplog)
-        self.logger.addHandler(self.log_handler)
-        self.logger.setLevel(logging.NOTSET)
-
-    def tearDown(self):
-        super(TestCaseWithLogging, self).tearDown()
-        self.log_handler.close()
-        self.logger.removeHandler(self.log_handler)
-
-
-def mock_imports(modules, preserve=()):
-    """Given a list of modules, mock everything, unless listed in the preserve
-    argument.
-    """
-    # Ensure iterable
-    if isinstance(modules, str):
-        modules = (modules,)
-    if isinstance(preserve, str):
-        preserve = (preserve,)
-
-    # First expand the list, since children modules needs their parent also
-    # mocked most of the time.
-    # Example: ['Crypto.PublicKey'] => ['Crypto', 'Crypto.PublicKey']
-    all_modules = []
-    for name in modules:
-        parts = name.split('.')
-        compound_name = []
-        for part in parts:
-            compound_name.append(part)
-            all_modules.append('.'.join(compound_name))
-
-    all_modules = set(m for m in all_modules if m not in preserve)
-    for module in all_modules:
-        logger.info('Mocking module `%s`', module)
-
-    mocks = {module: MagicMock() for module in all_modules}
-
-    return patch.dict('sys.modules', **mocks)
-
-
-def mock_dict(**kwargs):
-    """Create a dict that always respond something.
-
-    Arguments:
-        **kwargs: certain items that should be set in the created object
-    """
-    response = defaultdict(MagicMock)
-    for k, v in kwargs.items():
-        response[k] = v
-
-    return response
-
-
-def mock_object(**kwargs):
-    """Create an object that always respond something.
-
-    Arguments:
-        **kwargs: certain attributes that should be set in the created object
-    """
-    response = MagicMock()
-    for k, v in kwargs.items():
-        setattr(response, k, v)
-
-    return response
diff --git a/osm_ro/tests/test_db.py b/osm_ro/tests/test_db.py
deleted file mode 100644 (file)
index e152347..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: disable=E1101
-import unittest
-
-from MySQLdb import connect, cursors, DatabaseError, IntegrityError
-import mock
-from mock import Mock
-
-from ..db_base import retry, with_transaction
-from ..nfvo_db import nfvo_db
-from .db_helpers import TestCaseWithDatabase
-
-
-class TestDbDecorators(TestCaseWithDatabase):
-    @classmethod
-    def setUpClass(cls):
-        connection = connect(cls.host, cls.user, cls.password)
-        cursor = connection.cursor()
-        cursor.execute(
-            "CREATE DATABASE IF NOT EXISTS {};".format(
-                connection.escape_string(cls.database)))
-        cursor.execute("use {};".format(cls.database))
-        cursor.execute("""\
-            CREATE TABLE IF NOT EXISTS `test_table` (\
-                `id` int(11) NOT NULL,
-                PRIMARY KEY (`id`)\
-            );\
-        """)
-        cursor.close()
-        connection.close()
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.empty_database()
-
-    def setUp(self):
-        self.maxDiff = None
-        self.db = nfvo_db(self.host, self.user, self.password, self.database)
-        self.db.connect()
-        self.addCleanup(lambda: self.db.disconnect())
-
-    def db_run(self, query, cursor=None):
-        cursor = cursor or self.db.con.cursor()
-        cursor.execute(query)
-        return cursor.fetchone()
-
-    def test_retry_inject_attempt(self):
-        @retry
-        def _fn(db, attempt=None):
-            self.assertIsNotNone(attempt)
-            self.assertEqual(attempt.number, 1)
-
-        _fn(self.db)
-
-    def test_retry_accept_max_attempts(self):
-        success = []
-        failures = []
-
-        @retry(max_attempts=5)
-        def _fn(db, attempt=None):
-            if attempt.count < 4:
-                failures.append(attempt.count)
-                raise DatabaseError("Emulate DB error", "msg")
-            success.append(attempt.count)
-
-        _fn(self.db)
-        self.assertEqual(failures, [0, 1, 2, 3])
-        self.assertEqual(success, [4])
-
-    def test_retry_reconnect_auctomatically(self):
-        success = []
-        failures = []
-
-        @retry(max_attempts=3)
-        def _fn(db, attempt=None):
-            if attempt.count < 2:
-                failures.append(attempt.count)
-                db.con.close()  # Simulate connection failure
-            result = self.db_run('select 1+1, 2+2;')
-            success.append(attempt.count)
-            return result
-
-        result = _fn(self.db)
-        self.assertEqual(failures, [0, 1])
-        self.assertEqual(success, [2])
-        self.assertEqual(result, (2, 4))
-
-    def test_retry_reraise_non_db_errors(self):
-        failures = []
-
-        @retry
-        def _fn(db, attempt=None):
-            failures.append(attempt.count)
-            raise SystemError("Non Correlated Error")
-
-        with self.assertRaises(SystemError):
-            _fn(self.db)
-
-        self.assertEqual(failures, [0])
-
-    def test_transaction_rollback(self):
-        with self.assertRaises(IntegrityError), \
-                 self.db.transaction() as cursor:
-            # The first row is created normally
-            self.db_run('insert into test_table (id) values (1)', cursor)
-            # The second row fails due to repeated id
-            self.db_run('insert into test_table (id) values (1)', cursor)
-            # The entire transaction will rollback then, and therefore the
-            # first operation will be undone
-
-        count = self.db_run('select count(*) FROM test_table')
-        self.assertEqual(count, (0,))
-
-    def test_transaction_cursor(self):
-        with self.db.transaction(cursors.DictCursor) as cursor:
-            count = self.db_run('select count(*) as counter FROM test_table',
-                                cursor)
-
-        self.assertEqual(count, {'counter': 0})
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/tests/test_utils.py b/osm_ro/tests/test_utils.py
deleted file mode 100644 (file)
index 9fd71cf..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: disable=E1101
-
-import unittest
-
-from ..utils import (
-    get_arg,
-    inject_args,
-    remove_extra_items,
-)
-
-
-class TestUtils(unittest.TestCase):
-    def test_inject_args_curries_arguments(self):
-        fn = inject_args(lambda a=None, b=None: a + b, a=3, b=5)
-        self.assertEqual(fn(), 8)
-
-    def test_inject_args_doesnt_add_arg_if_not_needed(self):
-        fn = inject_args(lambda: 7, a=1, b=2)
-        self.assertEqual(fn(), 7)
-        fn = inject_args(lambda a=None: a, b=2)
-        self.assertEqual(fn(1), 1)
-
-    def test_inject_args_knows_how_to_handle_arg_order(self):
-        fn = inject_args(lambda a=None, b=None: b - a, a=3)
-        self.assertEqual(fn(b=4), 1)
-        fn = inject_args(lambda b=None, a=None: b - a, a=3)
-        self.assertEqual(fn(b=4), 1)
-
-    def test_inject_args_works_as_decorator(self):
-        fn = inject_args(x=1)(lambda x=None: x)
-        self.assertEqual(fn(), 1)
-
-    def test_get_arg__positional(self):
-        def _fn(x, y, z):
-            return x + y + z
-
-        x = get_arg("x", _fn, (1, 3, 4), {})
-        self.assertEqual(x, 1)
-        y = get_arg("y", _fn, (1, 3, 4), {})
-        self.assertEqual(y, 3)
-        z = get_arg("z", _fn, (1, 3, 4), {})
-        self.assertEqual(z, 4)
-
-    def test_get_arg__keyword(self):
-        def _fn(x, y, z=5):
-            return x + y + z
-
-        z = get_arg("z", _fn, (1, 2), {"z": 3})
-        self.assertEqual(z, 3)
-
-
-
-    def test_remove_extra_items__keep_aditional_properties(self):
-        schema = {
-            "type": "object",
-            "properties": {
-                "a": {
-                    "type": "object",
-                    "properties": {
-                        "type": "object",
-                        "properties": {"b": "string"},
-                    },
-                    "additionalProperties": True,
-                }
-            },
-        }
-
-        example = {"a": {"b": 1, "c": 2}, "d": 3}
-        deleted = remove_extra_items(example, schema)
-        self.assertIn("d", deleted)
-        self.assertIs(example.get("d"), None)
-        self.assertEqual(example["a"]["c"], 2)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/osm_ro/tests/test_vimconn_openstack.py b/osm_ro/tests/test_vimconn_openstack.py
deleted file mode 100644 (file)
index 5eb23f0..0000000
+++ /dev/null
@@ -1,854 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-This module contains unit tests for the OpenStack VIM connector
-Run this directly with python2 or python3.
-"""
-
-import copy
-import unittest
-
-import mock
-from neutronclient.v2_0.client import Client
-
-from osm_ro import vimconn
-from osm_ro.vimconn_openstack import vimconnector
-
-
-__author__ = "Igor D.C."
-__date__ = "$23-aug-2017 23:59:59$"
-
-
-class TestSfcOperations(unittest.TestCase):
-    def setUp(self):
-        # instantiate dummy VIM connector so we can test it
-        self.vimconn = vimconnector(
-            '123', 'openstackvim', '456', '789', 'http://dummy.url', None,
-            'user', 'pass')
-
-    def _test_new_sfi(self, create_sfc_port_pair, sfc_encap,
-                      ingress_ports=['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
-                      egress_ports=['230cdf1b-de37-4891-bc07-f9010cf1f967']):
-        # input to VIM connector
-        name = 'osm_sfi'
-        # + ingress_ports
-        # + egress_ports
-        # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
-        correlation = 'nsh'
-        if sfc_encap is not None:
-            if not sfc_encap:
-                correlation = None
-
-        # what OpenStack is assumed to respond (patch OpenStack's return value)
-        dict_from_neutron = {'port_pair': {
-            'id': '3d7ddc13-923c-4332-971e-708ed82902ce',
-            'name': name,
-            'description': '',
-            'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
-            'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
-            'ingress': ingress_ports[0] if len(ingress_ports) else None,
-            'egress': egress_ports[0] if len(egress_ports) else None,
-            'service_function_parameters': {'correlation': correlation}
-        }}
-        create_sfc_port_pair.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {'port_pair': {
-            'name': name,
-            'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-            'egress': '230cdf1b-de37-4891-bc07-f9010cf1f967',
-            'service_function_parameters': {'correlation': correlation}
-        }}
-
-        # call the VIM connector
-        if sfc_encap is None:
-            result = self.vimconn.new_sfi(name, ingress_ports, egress_ports)
-        else:
-            result = self.vimconn.new_sfi(name, ingress_ports, egress_ports,
-                                          sfc_encap)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_port_pair.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron['port_pair']['id'])
-
-    def _test_new_sf(self, create_sfc_port_pair_group):
-        # input to VIM connector
-        name = 'osm_sf'
-        instances = ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
-                     '12ba215e-3987-4892-bd3a-d0fd91eecf98',
-                     'e25a7c79-14c8-469a-9ae1-f601c9371ffd']
-
-        # what OpenStack is assumed to respond (patch OpenStack's return value)
-        dict_from_neutron = {'port_pair_group': {
-            'id': '3d7ddc13-923c-4332-971e-708ed82902ce',
-            'name': name,
-            'description': '',
-            'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
-            'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
-            'port_pairs': instances,
-            'group_id': 1,
-            'port_pair_group_parameters': {
-                "lb_fields": [],
-                "ppg_n_tuple_mapping": {
-                    "ingress_n_tuple": {},
-                    "egress_n_tuple": {}
-                }}
-        }}
-        create_sfc_port_pair_group.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {'port_pair_group': {
-            'name': name,
-            'port_pairs': ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd',
-                           '12ba215e-3987-4892-bd3a-d0fd91eecf98',
-                           'e25a7c79-14c8-469a-9ae1-f601c9371ffd']
-        }}
-
-        # call the VIM connector
-        result = self.vimconn.new_sf(name, instances)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_port_pair_group.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron['port_pair_group']['id'])
-
-    def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi):
-        # input to VIM connector
-        name = 'osm_sfp'
-        classifications = ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
-                           '00f23389-bdfa-43c2-8b16-5815f2582fa8']
-        sfs = ['2314daec-c262-414a-86e3-69bb6fa5bc16',
-               'd8bfdb5d-195e-4f34-81aa-6135705317df']
-
-        # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
-        correlation = 'nsh'
-        chain_id = 33
-        if spi:
-            chain_id = spi
-
-        # what OpenStack is assumed to respond (patch OpenStack's return value)
-        dict_from_neutron = {'port_chain': {
-            'id': '5bc05721-079b-4b6e-a235-47cac331cbb6',
-            'name': name,
-            'description': '',
-            'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
-            'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c',
-            'chain_id': chain_id,
-            'flow_classifiers': classifications,
-            'port_pair_groups': sfs,
-            'chain_parameters': {'correlation': correlation}
-        }}
-        create_sfc_port_chain.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {'port_chain': {
-            'name': name,
-            'flow_classifiers': ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19',
-                                 '00f23389-bdfa-43c2-8b16-5815f2582fa8'],
-            'port_pair_groups': ['2314daec-c262-414a-86e3-69bb6fa5bc16',
-                                 'd8bfdb5d-195e-4f34-81aa-6135705317df'],
-            'chain_parameters': {'correlation': correlation}
-        }}
-        if spi:
-            dict_to_neutron['port_chain']['chain_id'] = spi
-
-        # call the VIM connector
-        if sfc_encap is None:
-            if spi is None:
-                result = self.vimconn.new_sfp(name, classifications, sfs)
-            else:
-                result = self.vimconn.new_sfp(name, classifications, sfs,
-                                              spi=spi)
-        else:
-            if spi is None:
-                result = self.vimconn.new_sfp(name, classifications, sfs,
-                                              sfc_encap)
-            else:
-                result = self.vimconn.new_sfp(name, classifications, sfs,
-                                              sfc_encap, spi)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_port_chain.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron['port_chain']['id'])
-
-    def _test_new_classification(self, create_sfc_flow_classifier, ctype):
-        # input to VIM connector
-        name = 'osm_classification'
-        definition = {'ethertype': 'IPv4',
-                      'logical_source_port':
-                          'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
-                      'protocol': 'tcp',
-                      'source_ip_prefix': '192.168.2.0/24',
-                      'source_port_range_max': 99,
-                      'source_port_range_min': 50}
-
-        # what OpenStack is assumed to respond (patch OpenStack's return value)
-        dict_from_neutron = {'flow_classifier': copy.copy(definition)}
-        dict_from_neutron['flow_classifier'][
-            'id'] = '7735ec2c-fddf-4130-9712-32ed2ab6a372'
-        dict_from_neutron['flow_classifier']['name'] = name
-        dict_from_neutron['flow_classifier']['description'] = ''
-        dict_from_neutron['flow_classifier'][
-            'tenant_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
-        dict_from_neutron['flow_classifier'][
-            'project_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c'
-        create_sfc_flow_classifier.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {'flow_classifier': copy.copy(definition)}
-        dict_to_neutron['flow_classifier']['name'] = 'osm_classification'
-
-        # call the VIM connector
-        result = self.vimconn.new_classification(name, ctype, definition)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_flow_classifier.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron['flow_classifier']['id'])
-
-    @mock.patch.object(Client, 'create_sfc_flow_classifier')
-    def test_new_classification(self, create_sfc_flow_classifier):
-        self._test_new_classification(create_sfc_flow_classifier,
-                                      'legacy_flow_classifier')
-
-    @mock.patch.object(Client, 'create_sfc_flow_classifier')
-    def test_new_classification_unsupported_type(self, create_sfc_flow_classifier):
-        self.assertRaises(vimconn.vimconnNotSupportedException,
-                          self._test_new_classification,
-                          create_sfc_flow_classifier, 'h265')
-
-    @mock.patch.object(Client, 'create_sfc_port_pair')
-    def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair):
-        self._test_new_sfi(create_sfc_port_pair, True)
-
-    @mock.patch.object(Client, 'create_sfc_port_pair')
-    def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair):
-        self._test_new_sfi(create_sfc_port_pair, False)
-
-    @mock.patch.object(Client, 'create_sfc_port_pair')
-    def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair):
-        self._test_new_sfi(create_sfc_port_pair, None)
-
-    @mock.patch.object(Client, 'create_sfc_port_pair')
-    def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair):
-        ingress_ports = ['5311c75d-d718-4369-bbda-cdcc6da60fcc',
-                         'a0273f64-82c9-11e7-b08f-6328e53f0fa7']
-        self.assertRaises(vimconn.vimconnNotSupportedException,
-                          self._test_new_sfi,
-                          create_sfc_port_pair, True, ingress_ports=ingress_ports)
-        ingress_ports = []
-        self.assertRaises(vimconn.vimconnNotSupportedException,
-                          self._test_new_sfi,
-                          create_sfc_port_pair, True, ingress_ports=ingress_ports)
-
-    @mock.patch.object(Client, 'create_sfc_port_pair')
-    def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair):
-        egress_ports = ['230cdf1b-de37-4891-bc07-f9010cf1f967',
-                        'b41228fe-82c9-11e7-9b44-17504174320b']
-        self.assertRaises(vimconn.vimconnNotSupportedException,
-                          self._test_new_sfi,
-                          create_sfc_port_pair, True, egress_ports=egress_ports)
-        egress_ports = []
-        self.assertRaises(vimconn.vimconnNotSupportedException,
-                          self._test_new_sfi,
-                          create_sfc_port_pair, True, egress_ports=egress_ports)
-
-    @mock.patch.object(vimconnector, 'get_sfi')
-    @mock.patch.object(Client, 'create_sfc_port_pair_group')
-    def test_new_sf(self, create_sfc_port_pair_group, get_sfi):
-        get_sfi.return_value = {'sfc_encap': True}
-        self._test_new_sf(create_sfc_port_pair_group)
-
-    @mock.patch.object(vimconnector, 'get_sfi')
-    @mock.patch.object(Client, 'create_sfc_port_pair_group')
-    def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group,
-                                           get_sfi):
-        get_sfi.return_value = {'sfc_encap': 'nsh'}
-        self.assertRaises(vimconn.vimconnNotSupportedException,
-                          self._test_new_sf, create_sfc_port_pair_group)
-
-    @mock.patch.object(Client, 'create_sfc_port_chain')
-    def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, True, None)
-
-    @mock.patch.object(Client, 'create_sfc_port_chain')
-    def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, False, None)
-        self._test_new_sfp(create_sfc_port_chain, False, 25)
-
-    @mock.patch.object(Client, 'create_sfc_port_chain')
-    def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, None, None)
-
-    @mock.patch.object(Client, 'create_sfc_port_chain')
-    def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, True, 25)
-
-    @mock.patch.object(Client, 'create_sfc_port_chain')
-    def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, None, 25)
-
-    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
-    def test_get_classification_list(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
-            {'source_port_range_min': 2000,
-             'destination_ip_prefix': '192.168.3.0/24',
-             'protocol': 'udp',
-             'description': '',
-             'ethertype': 'IPv4',
-             'l7_parameters': {},
-             'source_port_range_max': 2000,
-             'destination_port_range_min': 3000,
-             'source_ip_prefix': '192.168.2.0/24',
-             'logical_destination_port': None,
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'destination_port_range_max': None,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
-             'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
-             'name': 'fc1'}]}
-
-        # call the VIM connector
-        filter_dict = {'protocol': 'tcp', 'ethertype': 'IPv4'}
-        result = self.vimconn.get_classification_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(result, [
-            {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
-             'name': 'fc1',
-             'description': '',
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'ctype': 'legacy_flow_classifier',
-             'definition': {
-                 'source_port_range_min': 2000,
-                 'destination_ip_prefix': '192.168.3.0/24',
-                 'protocol': 'udp',
-                 'ethertype': 'IPv4',
-                 'l7_parameters': {},
-                 'source_port_range_max': 2000,
-                 'destination_port_range_min': 3000,
-                 'source_ip_prefix': '192.168.2.0/24',
-                 'logical_destination_port': None,
-                 'destination_port_range_max': None,
-                 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
-             }])
-
-    def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap):
-        # what OpenStack is assumed to return to the VIM connector
-        list_port_pair.return_value = {'port_pairs': [
-            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'service_function_parameters': {'correlation': correlation},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
-             'name': 'osm_sfi'}]}
-
-        # call the VIM connector
-        filter_dict = {'name': 'osm_sfi', 'description': ''}
-        result = self.vimconn.get_sfi_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_port_pair.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(result, [
-            {'ingress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'egress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'],
-             'sfc_encap': sfc_encap,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
-             'name': 'osm_sfi'}])
-
-    @mock.patch.object(Client, 'list_sfc_port_pairs')
-    def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs):
-        self._test_get_sfi_list(list_sfc_port_pairs, 'nsh', True)
-
-    @mock.patch.object(Client, 'list_sfc_port_pairs')
-    def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs):
-        self._test_get_sfi_list(list_sfc_port_pairs, None, False)
-
-    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
-    def test_get_sf_list(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
-            {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
-                            '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'port_pair_group_parameters': {},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
-             'name': 'osm_sf'}]}
-
-        # call the VIM connector
-        filter_dict = {'name': 'osm_sf', 'description': ''}
-        result = self.vimconn.get_sf_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(result, [
-            {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2',
-                           '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f',
-             'name': 'osm_sf'}])
-
-    def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {'port_chains': [
-            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
-                                  '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
-             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
-                                  '1387ab44-82d7-11e7-9bb0-476337183905'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'chain_parameters': {'correlation': correlation},
-             'chain_id': 40,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
-             'name': 'osm_sfp'}]}
-
-        # call the VIM connector
-        filter_dict = {'name': 'osm_sfp', 'description': ''}
-        result = self.vimconn.get_sfp_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(result, [
-            {'service_functions': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25',
-                                   '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'],
-             'classifications': ['1333c2f4-82d7-11e7-a5df-9327f33d104e',
-                                 '1387ab44-82d7-11e7-9bb0-476337183905'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'sfc_encap': sfc_encap,
-             'spi': 40,
-             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
-             'name': 'osm_sfp'}])
-
-    @mock.patch.object(Client, 'list_sfc_port_chains')
-    def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains):
-        self._test_get_sfp_list(list_sfc_port_chains, 'nsh', True)
-
-    @mock.patch.object(Client, 'list_sfc_port_chains')
-    def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains):
-        self._test_get_sfp_list(list_sfc_port_chains, None, False)
-
-    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
-    def test_get_classification(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
-            {'source_port_range_min': 2000,
-             'destination_ip_prefix': '192.168.3.0/24',
-             'protocol': 'udp',
-             'description': '',
-             'ethertype': 'IPv4',
-             'l7_parameters': {},
-             'source_port_range_max': 2000,
-             'destination_port_range_min': 3000,
-             'source_ip_prefix': '192.168.2.0/24',
-             'logical_destination_port': None,
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'destination_port_range_max': None,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
-             'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
-             'name': 'fc1'}
-        ]}
-
-        # call the VIM connector
-        result = self.vimconn.get_classification(
-            '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(
-            id='22198366-d4e8-4d6b-b4d2-637d5d6cbb7d')
-        # assert that VIM connector successfully returned the OpenStack result
-        self.assertEqual(result,
-                         {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
-                          'name': 'fc1',
-                          'description': '',
-                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'ctype': 'legacy_flow_classifier',
-                          'definition': {
-                              'source_port_range_min': 2000,
-                              'destination_ip_prefix': '192.168.3.0/24',
-                              'protocol': 'udp',
-                              'ethertype': 'IPv4',
-                              'l7_parameters': {},
-                              'source_port_range_max': 2000,
-                              'destination_port_range_min': 3000,
-                              'source_ip_prefix': '192.168.2.0/24',
-                              'logical_destination_port': None,
-                              'destination_port_range_max': None,
-                              'logical_source_port':
-                                  'aaab0ab0-1452-4636-bb3b-11dca833fa2b'}
-                          })
-
-    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
-    def test_get_classification_many_results(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {'flow_classifiers': [
-            {'source_port_range_min': 2000,
-             'destination_ip_prefix': '192.168.3.0/24',
-             'protocol': 'udp',
-             'description': '',
-             'ethertype': 'IPv4',
-             'l7_parameters': {},
-             'source_port_range_max': 2000,
-             'destination_port_range_min': 3000,
-             'source_ip_prefix': '192.168.2.0/24',
-             'logical_destination_port': None,
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'destination_port_range_max': None,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
-             'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d',
-             'name': 'fc1'},
-            {'source_port_range_min': 1000,
-             'destination_ip_prefix': '192.168.3.0/24',
-             'protocol': 'udp',
-             'description': '',
-             'ethertype': 'IPv4',
-             'l7_parameters': {},
-             'source_port_range_max': 1000,
-             'destination_port_range_min': 3000,
-             'source_ip_prefix': '192.168.2.0/24',
-             'logical_destination_port': None,
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'destination_port_range_max': None,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b',
-             'id': '3196bafc-82dd-11e7-a205-9bf6c14b0721',
-             'name': 'fc2'}
-        ]}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnConflictException,
-                          self.vimconn.get_classification,
-                          '3196bafc-82dd-11e7-a205-9bf6c14b0721')
-
-        # assert the VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(
-            id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
-
-    @mock.patch.object(Client, 'list_sfc_flow_classifiers')
-    def test_get_classification_no_results(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {'flow_classifiers': []}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnNotFoundException,
-                          self.vimconn.get_classification,
-                          '3196bafc-82dd-11e7-a205-9bf6c14b0721')
-
-        # assert the VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(
-            id='3196bafc-82dd-11e7-a205-9bf6c14b0721')
-
-    @mock.patch.object(Client, 'list_sfc_port_pairs')
-    def test_get_sfi(self, list_sfc_port_pairs):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pairs.return_value = {'port_pairs': [
-            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'service_function_parameters': {'correlation': 'nsh'},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
-             'name': 'osm_sfi1'},
-        ]}
-
-        # call the VIM connector
-        result = self.vimconn.get_sfi('c121ebdd-7f2d-4213-b933-3325298a6966')
-
-        # assert the VIM connector called OpenStack with the expected filter
-        list_sfc_port_pairs.assert_called_with(
-            id='c121ebdd-7f2d-4213-b933-3325298a6966')
-        # assert the VIM connector successfully returned the OpenStack result
-        self.assertEqual(result,
-                         {'ingress_ports': [
-                             '5311c75d-d718-4369-bbda-cdcc6da60fcc'],
-                          'egress_ports': [
-                              '5311c75d-d718-4369-bbda-cdcc6da60fcc'],
-                          'sfc_encap': True,
-                          'description': '',
-                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
-                          'name': 'osm_sfi1'})
-
-    @mock.patch.object(Client, 'list_sfc_port_pairs')
-    def test_get_sfi_many_results(self, list_sfc_port_pairs):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pairs.return_value = {'port_pairs': [
-            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'service_function_parameters': {'correlation': 'nsh'},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'c121ebdd-7f2d-4213-b933-3325298a6966',
-             'name': 'osm_sfi1'},
-            {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc',
-             'service_function_parameters': {'correlation': 'nsh'},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'c0436d92-82db-11e7-8f9c-5fa535f1261f',
-             'name': 'osm_sfi2'}
-        ]}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnConflictException,
-                          self.vimconn.get_sfi,
-                          'c0436d92-82db-11e7-8f9c-5fa535f1261f')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pairs.assert_called_with(
-            id='c0436d92-82db-11e7-8f9c-5fa535f1261f')
-
-    @mock.patch.object(Client, 'list_sfc_port_pairs')
-    def test_get_sfi_no_results(self, list_sfc_port_pairs):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pairs.return_value = {'port_pairs': []}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnNotFoundException,
-                          self.vimconn.get_sfi,
-                          'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pairs.assert_called_with(
-            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
-    def test_get_sf(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
-            {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'port_pair_group_parameters': {},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
-             'name': 'osm_sf1'}
-        ]}
-
-        # call the VIM connector
-        result = self.vimconn.get_sf('b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(
-            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-        # assert that VIM connector successfully returned the OpenStack result
-        self.assertEqual(result,
-                         {'description': '',
-                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
-                          'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
-                          'name': 'osm_sf1'})
-
-    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
-    def test_get_sf_many_results(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {'port_pair_groups': [
-            {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'port_pair_group_parameters': {},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d',
-             'name': 'osm_sf1'},
-            {'port_pairs': ['0d63799c-82d6-11e7-8deb-a746bb3ae9f5'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'port_pair_group_parameters': {},
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': 'b22892fc-82d9-11e7-ae85-0fea6a3b3757',
-             'name': 'osm_sf2'}
-        ]}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnConflictException,
-                          self.vimconn.get_sf,
-                          'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(
-            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-    @mock.patch.object(Client, 'list_sfc_port_pair_groups')
-    def test_get_sf_no_results(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {'port_pair_groups': []}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnNotFoundException,
-                          self.vimconn.get_sf,
-                          'b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(
-            id='b22892fc-82d9-11e7-ae85-0fea6a3b3757')
-
-    @mock.patch.object(Client, 'list_sfc_port_chains')
-    def test_get_sfp(self, list_sfc_port_chains):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {'port_chains': [
-            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
-             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'chain_parameters': {'correlation': 'nsh'},
-             'chain_id': 40,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
-             'name': 'osm_sfp1'}]}
-
-        # call the VIM connector
-        result = self.vimconn.get_sfp('821bc9be-82d7-11e7-8ce3-23a08a27ab47')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(
-            id='821bc9be-82d7-11e7-8ce3-23a08a27ab47')
-        # assert that VIM connector successfully returned the OpenStack result
-        self.assertEqual(result,
-                         {'service_functions': [
-                             '7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
-                          'classifications': [
-                              '1333c2f4-82d7-11e7-a5df-9327f33d104e'],
-                          'description': '',
-                          'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-                          'sfc_encap': True,
-                          'spi': 40,
-                          'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
-                          'name': 'osm_sfp1'})
-
-    @mock.patch.object(Client, 'list_sfc_port_chains')
-    def test_get_sfp_many_results(self, list_sfc_port_chains):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {'port_chains': [
-            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
-             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'chain_parameters': {'correlation': 'nsh'},
-             'chain_id': 40,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47',
-             'name': 'osm_sfp1'},
-            {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'],
-             'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'],
-             'description': '',
-             'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'chain_parameters': {'correlation': 'nsh'},
-             'chain_id': 50,
-             'project_id': '8f3019ef06374fa880a0144ad4bc1d7b',
-             'id': '5d002f38-82de-11e7-a770-f303f11ce66a',
-             'name': 'osm_sfp2'}
-        ]}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnConflictException,
-                          self.vimconn.get_sfp,
-                          '5d002f38-82de-11e7-a770-f303f11ce66a')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(
-            id='5d002f38-82de-11e7-a770-f303f11ce66a')
-
-    @mock.patch.object(Client, 'list_sfc_port_chains')
-    def test_get_sfp_no_results(self, list_sfc_port_chains):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {'port_chains': []}
-
-        # call the VIM connector
-        self.assertRaises(vimconn.vimconnNotFoundException,
-                          self.vimconn.get_sfp,
-                          '5d002f38-82de-11e7-a770-f303f11ce66a')
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(
-            id='5d002f38-82de-11e7-a770-f303f11ce66a')
-
-    @mock.patch.object(Client, 'delete_sfc_flow_classifier')
-    def test_delete_classification(self, delete_sfc_flow_classifier):
-        result = self.vimconn.delete_classification(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        delete_sfc_flow_classifier.assert_called_with(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
-
-    @mock.patch.object(Client, 'delete_sfc_port_pair')
-    def test_delete_sfi(self, delete_sfc_port_pair):
-        result = self.vimconn.delete_sfi(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        delete_sfc_port_pair.assert_called_with(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
-
-    @mock.patch.object(Client, 'delete_sfc_port_pair_group')
-    def test_delete_sf(self, delete_sfc_port_pair_group):
-        result = self.vimconn.delete_sf('638f957c-82df-11e7-b7c8-132706021464')
-        delete_sfc_port_pair_group.assert_called_with(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
-
-    @mock.patch.object(Client, 'delete_sfc_port_chain')
-    def test_delete_sfp(self, delete_sfc_port_chain):
-        result = self.vimconn.delete_sfp(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        delete_sfc_port_chain.assert_called_with(
-            '638f957c-82df-11e7-b7c8-132706021464')
-        self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464')
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/tests/test_vimconn_vmware.py b/osm_ro/tests/test_vimconn_vmware.py
deleted file mode 100755 (executable)
index 89ca36c..0000000
+++ /dev/null
@@ -1,980 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact:  osslegalrouting@vmware.com
-##
-
-
-from osm_ro.vimconn_vmware import vimconnector
-from osm_ro.vimconn import vimconnUnexpectedResponse,vimconnNotFoundException,vimconnException
-from pyvcloud.vcd.client import Client
-from lxml import etree as lxmlElementTree
-from pyvcloud.vcd.org import Org
-from pyvcloud.vcd.vdc import VDC
-from pyvcloud.vcd.vapp import VApp
-import os
-import unittest
-import mock
-import test_vimconn_vmware_xml_response as xml_resp
-from os import path
-
-__author__ = "Prakash Kasar"
-
-class TestVimconn_VMware(unittest.TestCase):
-    def setUp(self):
-        config = { "admin_password": "admin",
-                  "admin_username":"user",
-                  "nsx_user": "nsx",
-                  "nsx_password": "nsx",
-                  "nsx_manager":"https://test-nsx" }
-
-        self.client = Client('test', verify_ssl_certs=False)
-
-        # get vcd org object
-        org_resp = xml_resp.org_xml_response
-        get_org = lxmlElementTree.fromstring(org_resp)
-        self.org = Org(self.client, resource=get_org)
-
-        self.vim = vimconnector(uuid='12354',
-                                 name='test',
-                         tenant_id='abc1234',
-                          tenant_name='test',
-                          url='https://test',
-                               config=config)
-
-
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'perform_request')
-    def test_get_network_not_found(self, perform_request, connect, get_vdc_details):
-        """
-        Testcase to get network with invalid network id
-        """
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        self.vim.client = self.vim.connect()
-        perform_request.return_value.status_code = 200
-        perform_request.return_value.content = xml_resp.vdc_xml_response
-
-        # call to VIM connector method with invalid id
-        self.assertRaises(vimconnNotFoundException,self.vim.get_network,'mgmt-net')
-
-    @mock.patch.object(vimconnector,'perform_request')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'connect')
-    def test_get_network(self, connect, get_vdc_details, perform_request):
-        """
-        Testcase to get network with valid network id
-        """
-        net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        self.vim.client = self.vim.connect()
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = xml_resp.vdc_xml_response),
-                                       mock.Mock(status_code = 200,
-                                       content = xml_resp.network_xml_response)]
-        # call to VIM connector method with network_id
-        result = self.vim.get_network(net_id)
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(net_id, result['id'])
-
-    @mock.patch.object(vimconnector,'perform_request')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'connect')
-    def test_get_network_list_not_found(self, connect, get_vdc_details, perform_request):
-        """
-        Testcase to get list of available networks by invalid network id
-        """
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        network_xml_resp = xml_resp.network_xml_response
-        # created vdc object
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        self.vim.client = self.vim.connect()
-        perform_request.return_value.status_code = 200
-        perform_request.return_value.content = network_xml_resp
-
-        # call to VIM connector method with network_id
-        result = self.vim.get_network_list({'id':'45hdfg-345nb-345'})
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(list(), result)
-
-    @mock.patch.object(vimconnector,'perform_request')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'connect')
-    def test_get_network_list(self, connect, get_vdc_details, perform_request):
-        """
-        Testcase to get list of available networks by valid network id
-        """
-        #import pdb;pdb.set_trace() ## Not working
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
-        # created vdc object
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        # created network object
-        network_xml_resp = xml_resp.network_xml_response
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        self.vim.client = self.vim.connect()
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = xml_resp.vdc_xml_response),
-                                       mock.Mock(status_code = 200,
-                                       content = network_xml_resp)]
-        perform_request.reset_mock()
-        perform_request()
-
-        # call to VIM connector method with network_id
-        result = self.vim.get_network_list({'id': net_id})
-
-        # assert verified expected and return result from VIM connector
-        for item in result:
-            self.assertEqual(item.get('id'), net_id)
-            self.assertEqual(item.get('status'), 'ACTIVE')
-            self.assertEqual(item.get('shared'), False)
-
-    @mock.patch.object(vimconnector,'create_network_rest')
-    def test_new_network(self, create_network_rest):
-        """
-        Testcase to create new network by passing network name and type
-        """
-        # create network reposnse
-        create_net_xml_resp = xml_resp.create_network_xml_response
-        net_name = 'Test_network'
-        net_type = 'bridge'
-        # assumed return value from VIM connector
-        create_network_rest.return_value = create_net_xml_resp
-        # call to VIM connector method with network name and type
-        result = self.vim.new_network(net_name, net_type)
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result, 'df1956fa-da04-419e-a6a2-427b6f83788f')
-
-    @mock.patch.object(vimconnector, 'create_network_rest')
-    def test_new_network_not_created(self, create_network_rest):
-        """
-        Testcase to create new network by assigning empty xml data
-        """
-        # assumed return value from VIM connector
-        create_network_rest.return_value = """<?xml version="1.0" encoding="UTF-8"?>
-                                              <OrgVdcNetwork></OrgVdcNetwork>"""
-
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnUnexpectedResponse,self.vim.new_network,
-                                                              'test_net',
-                                                                'bridge')
-
-    @mock.patch.object(vimconnector, 'connect')
-    @mock.patch.object(vimconnector, 'get_network_action')
-    @mock.patch.object(vimconnector, 'delete_network_action')
-    def test_delete_network(self, delete_network_action, get_network_action, connect):
-        """
-        Testcase to delete network by network id
-        """
-        net_uuid = '0a55e5d1-43a2-4688-bc92-cb304046bf87'
-        # delete network response
-        delete_net_xml_resp = xml_resp.delete_network_xml_response
-
-        # assumed return value from VIM connector
-        self.vim.client = self.vim.connect()
-        get_network_action.return_value = delete_net_xml_resp
-        delete_network_action.return_value = True
-        # call to VIM connector method with network_id
-        result = self.vim.delete_network(net_uuid)
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result, net_uuid)
-
-    @mock.patch.object(vimconnector, 'get_vcd_network')
-    def test_delete_network_not_found(self, get_vcd_network):
-        """
-        Testcase to delete network by invalid network id
-        """
-        # assumed return value from VIM connector
-        get_vcd_network.return_value = False
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnNotFoundException,self.vim.delete_network,
-                                    '2a23e5d1-42a2-0648-bc92-cb508046bf87')
-
-    def test_get_flavor(self):
-        """
-        Testcase to get flavor data
-        """
-        flavor_data = {'a646eb8a-95bd-4e81-8321-5413ee72b62e': {'disk': 10,
-                                                                'vcpus': 1,
-                                                               'ram': 1024}}
-        vimconnector.flavorlist = flavor_data
-        result = self.vim.get_flavor('a646eb8a-95bd-4e81-8321-5413ee72b62e')
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result, flavor_data['a646eb8a-95bd-4e81-8321-5413ee72b62e'])
-
-    def test_get_flavor_not_found(self):
-        """
-        Testcase to get flavor data with invalid id
-        """
-        vimconnector.flavorlist = {}
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnNotFoundException,self.vim.get_flavor,
-                                'a646eb8a-95bd-4e81-8321-5413ee72b62e')
-
-    def test_new_flavor(self):
-        """
-        Testcase to create new flavor data
-        """
-        flavor_data = {'disk': 10, 'vcpus': 1, 'ram': 1024}
-        result = self.vim.new_flavor(flavor_data)
-        # assert verified expected and return result from VIM connector
-        self.assertIsNotNone(result)
-
-    def test_delete_flavor(self):
-        """
-        Testcase to delete flavor data
-        """
-        flavor_data = {'2cb3dffb-5c51-4355-8406-28553ead28ac': {'disk': 10,
-                                                                'vcpus': 1,
-                                                               'ram': 1024}}
-        vimconnector.flavorlist = flavor_data
-        # return value from VIM connector
-        result = self.vim.delete_flavor('2cb3dffb-5c51-4355-8406-28553ead28ac')
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result, '2cb3dffb-5c51-4355-8406-28553ead28ac')
-
-    @mock.patch.object(vimconnector,'connect_as_admin')
-    @mock.patch.object(vimconnector,'perform_request')
-    def test_delete_image_not_found(self, perform_request, connect_as_admin):
-        """
-        Testcase to delete image by invalid image id
-        """
-        # creating conn object
-        self.vim.client = self.vim.connect_as_admin()
-
-        # assumed return value from VIM connector
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = xml_resp.delete_catalog_xml_response),
-                                       mock.Mock(status_code = 201,
-                                       content = xml_resp.delete_catalog_item_xml_response)
-                                       ]
-
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnNotFoundException, self.vim.delete_image, 'invali3453')
-
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(Org,'list_catalogs')
-    def test_get_image_list(self, list_catalogs, connect, get_vdc_details):
-        """
-        Testcase to get image list by valid image id
-        """
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        self.vim.client = self.vim.connect()
-
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2018-01-26T02:09:12.387-08:00', 'id': 'b139ed82-7ca4-49fb-9882-5f841f59c890', 'name': 'Ubuntu_plugtest-1'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org2', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-06-18T21:33:16.430-07:00', 'id': 'b31e6973-86d2-404b-a522-b16846d099dc', 'name': 'Ubuntu_Cat'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '0', 'creationDate': '2018-02-15T22:26:28.910-08:00', 'id': 'c3b56180-f980-4256-9109-a93168d73ff2', 'name': 'de4ffcf2ad21f1a5d0714d6b868e2645'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-08-23T05:54:56.780-07:00', 'id': 'd0eb0b02-718d-42e0-b889-56575000b52d', 'name': 'Test_Cirros'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-03-08T21:25:05.923-08:00', 'id': 'd3fa3df2-b311-4571-9138-4c66541d7f46', 'name': 'cirros_10'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-07-12T22:45:20.537-07:00', 'id': 'd64b2617-ea4b-4b90-910b-102c99dd2031', 'name': 'Ubuntu16'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '1', 'creationDate': '2017-10-14T23:52:37.260-07:00', 'id': 'e8d953db-8dc9-46d5-9cab-329774cd2ad9', 'name': 'Ubuntu_no_nic'}]
-
-        result = self.vim.get_image_list({'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a'})
-
-        # assert verified expected and return result from VIM connector
-        for item in result:
-            self.assertEqual(item['id'], '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a')
-
-    @mock.patch.object(vimconnector,'get_vapp_details_rest')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    def test_get_vminstance(self, get_vdc_details, get_vapp_details_rest):
-        """
-        Testcase to get vminstance by valid vm id
-        """
-        vapp_info = {'status': '4',
-                   'acquireMksTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket',
-                   'type': 'application/vnd.vmware.vcloud.mksTicket+xml', 'rel': 'screen:acquireMksTicket'},
-                   'vm_virtual_hardware': {'disk_edit_href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks', 'disk_size': '40960'},
-                   'name': 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa',
-                   'created': '2017-09-21T01:15:31.627-07:00',
-                    'IsEnabled': 'true',
-                   'EndAddress': '12.16.24.199',
-                   'interfaces': [{'MACAddress': '00:50:56:01:12:a2',
-                                   'NetworkConnectionIndex': '0',
-                                   'network': 'testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d',
-                                   'IpAddressAllocationMode': 'DHCP',
-                                   'IsConnected': 'true',
-                                   'IpAddress': '12.16.24.200'}],
-                   'ovfDescriptorUploaded': 'true',
-                   'nestedHypervisorEnabled': 'false',
-                   'Gateway': '12.16.24.1',
-                   'acquireTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket',
-                   'rel': 'screen:acquireTicket'},
-                   'vmuuid': '47d12505-5968-4e16-95a7-18743edb0c8b',
-                   'Netmask': '255.255.255.0',
-                   'StartAddress': '12.16.24.100',
-                   'primarynetwork': '0',
-                   'networkname': 'External-Network-1074',
-                   'IsInherited': 'false',
-                   'deployed': 'true'} 
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        get_vapp_details_rest.return_value = vapp_info
-
-        result = self.vim.get_vminstance('47d12505-5968-4e16-95a7-18743edb0c8b')
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result['status'], 'ACTIVE')
-        self.assertEqual(result['hostId'], '47d12505-5968-4e16-95a7-18743edb0c8b')
-
-
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'get_namebyvappid')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(VDC,'get_vapp')
-    @mock.patch.object(VApp,'power_off')
-    @mock.patch.object(VApp,'undeploy')
-    @mock.patch.object(VDC,'delete_vapp')
-    @mock.patch.object(Client,'get_task_monitor')
-    def test_delete_vminstance(self, get_task_monitor, delete_vapp,
-                                               undeploy, poweroff,
-                                         get_vapp, get_vdc_details,
-                                        get_namebyvappid, connect):
-        """
-        Testcase to delete vminstance by valid vm id
-        """
-        vm_id = '4f6a9b49-e92d-4935-87a1-0e4dc9c3a069'
-        vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        # assumed return value from VIM connector
-        self.vim.client = self.vim.connect()
-        get_vdc_details.return_value = self.org, vdc
-        get_namebyvappid.return_name = vm_name
-
-        vapp_resp = xml_resp.vapp_xml_response
-        vapp = lxmlElementTree.fromstring(vapp_resp)
-        get_vapp.return_value = vapp
-
-        power_off_resp = xml_resp.poweroff_task_xml
-        power_off = lxmlElementTree.fromstring(power_off_resp)
-        poweroff.return_value = power_off
-
-        status_resp = xml_resp.status_task_xml
-        status = lxmlElementTree.fromstring(status_resp)
-        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
-
-        # call to VIM connector method
-        result = self.vim.delete_vminstance(vm_id)
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result, vm_id)
-
-    @mock.patch.object(vimconnector,'get_network_id_by_name')
-    @mock.patch.object(vimconnector,'get_vm_pci_details')
-    @mock.patch.object(VDC,'get_vapp')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'get_namebyvappid')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'perform_request')
-    @mock.patch.object(VApp,'get_all_vms')
-    def test_refresh_vms_status(self, get_all_vms, perform_request, get_vdc_details,
-                                                          get_namebyvappid, connect,
-                                                       get_vapp, get_vm_pci_details,
-                                                            get_network_id_by_name):
-        """
-        Testcase to refresh vms status by valid vm id
-        """
-        vm_id = '53a529b2-10d8-4d56-a7ad-8182acdbe71c'
-
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        # assumed return value from VIM connector
-        self.vim.client = self.vim.connect()
-        get_vdc_details.return_value = self.org, vdc
-
-        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
-        get_vm_pci_details.return_value = {'host_name': 'test-esx-1.corp.local', 'host_ip': '12.19.24.31'}
-        vapp_resp = xml_resp.vapp_xml_response
-        vapp = lxmlElementTree.fromstring(vapp_resp)
-        get_vapp.return_value = vapp
-        get_network_id_by_name.return_value = '47d12505-5968-4e16-95a7-18743edb0c8b'
-
-        vm_resp = xml_resp.vm_xml_response
-        vm_list = lxmlElementTree.fromstring(vm_resp)
-        get_all_vms.return_value = vm_list
-
-        perform_request.return_value.status_code = 200
-        perform_request.return_value.content = vm_resp
-        # call to VIM connector method
-        result = self.vim.refresh_vms_status([vm_id])
-        for attr in result[vm_id]:
-            if attr == 'status':
-                # assert verified expected and return result from VIM connector
-                self.assertEqual(result[vm_id][attr], 'ACTIVE')
-
-    @mock.patch.object(vimconnector,'get_vcd_network')
-    def test_refresh_nets_status(self, get_vcd_network):
-        net_id = 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'
-        network_dict = {'status': '1','isShared': 'false','IpScope': '',
-                        'EndAddress':'12.19.21.15',
-                        'name': 'testing_gwyRXlvWYL1-9ebb6d7b-5c74-472f-be77-963ed050d44d',
-                        'Dns1': '12.19.21.10', 'IpRanges': '',
-                        'Gateway': '12.19.21.23', 'Netmask': '255.255.255.0',
-                        'RetainNetInfoAcrossDeployments': 'false',
-                        'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local',
-                        'StartAddress': '12.19.21.11', 'IpRange': '',
-                        'Configuration': '', 'FenceMode': 'bridged',
-                        'IsInherited': 'true', 'uuid': 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'}
-        # assumed return value from VIM connector
-        get_vcd_network.return_value = network_dict
-        result = self.vim.refresh_nets_status([net_id])
-        # assert verified expected and return result from VIM connector
-        for attr in result[net_id]:
-            if attr == 'status':
-                self.assertEqual(result[net_id][attr], 'ACTIVE')
-
-    @mock.patch.object(VDC,'get_vapp')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'get_namebyvappid')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    def test_action_vminstance(self, get_vdc_details, get_namebyvappid,
-                                                               connect,
-                                                             get_vapp):
-        """
-        Testcase for action vm instance by vm id
-        """
-        task_resp = xml_resp.poweroff_task_xml
-        vm_id = '05e6047b-6938-4275-8940-22d1ea7245b8'
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
-        self.vim.client = self.vim.connect()
-        power_off_resp = xml_resp.poweroff_task_xml
-        power_off = lxmlElementTree.fromstring(power_off_resp)
-        get_vapp.return_value.undeploy.return_value = power_off
-
-        status_resp = xml_resp.status_task_xml
-        status = lxmlElementTree.fromstring(status_resp)
-        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
-
-        # call to VIM connector method
-        result = self.vim.action_vminstance(vm_id,{'shutdown': None})
-
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(result, vm_id)
-
-    @mock.patch.object(vimconnector,'get_org')
-    def test_get_tenant_list(self, get_org):
-        """
-        Test case for get tenant list
-        """
-        org_dict = {'catalogs': {'4c4fdb5d-0c7d-4fee-9efd-cb061f327a01': '80d8488f67ba1de98b7f485fba6abbd2', '1b98ca02-b0a6-4ca7-babe-eadc0ae59677': 'Ubuntu', 'e7f27dfe-14b7-49e1-918e-173bda02683a': '834bdd1f28fd15dcbe830456ec58fbca', '9441ee69-0486-4438-ac62-8d8082c51302': 'centos', 'e660cce0-47a6-4315-a5b9-97a39299a374': 'cirros01', '0fd96c61-c3d1-4abf-9a34-0dff8fb65743': 'cirros034', '1c703be3-9bd2-46a2-854c-3e678d5cdda8': 'Ubuntu_plugtest-1', 'bc4e342b-f84c-41bd-a93a-480f35bacf69': 'Cirros', '8a206fb5-3ef9-4571-9bcc-137615f4d930': '255eb079a62ac155e7f942489f14b0c4'}, 'vdcs': {'e6436c6a-d922-4b39-9c1c-b48e766fce5e': 'osm', '3852f762-18ae-4833-a229-42684b6e7373': 'cloud-1-vdc'}, 'networks': {'e203cacd-9320-4422-9be0-12c7def3ab56': 'testing_lNejr37B-38e4ca67-1e26-486f-ad2f-f14bb099e068', 'a6623349-2bef-4367-9fda-d33f9ab927f8': 'Vlan_3151', 'adf780cb-358c-47c2-858d-ae5778ccaf17': 'testing_xwBultc-99b8a2ae-c091-4dd3-bbf7-762a51612385', '721f9efc-11fe-4c13-936d-252ba0ed93c8': 'testing_tLljy8WB5e-a898cb28-e75b-4867-a22e-f2bad285c144', '1512d97a-929d-4b06-b8af-cf5ac42a2aee': 'Managment', 'd9167301-28af-4b89-b9e0-09f612e962fa': 'testing_prMW1VThk-063cb428-eaee-44b8-9d0d-df5fb77a5b4d', '004ae853-f899-43fd-8981-7513a3b40d6b': 'testing_RTtKVi09rld-fab00b16-7996-49af-8249-369c6bbfa02d'}}
-        tenant_name = 'osm'
-        get_org.return_value = org_dict
-
-        # call to VIM connector method
-        results = self.vim.get_tenant_list({'name' : tenant_name})
-        # assert verified expected and return result from VIM connector
-        for result in results:
-            self.assertEqual(tenant_name,result['name'])
-
-    @mock.patch.object(vimconnector,'get_org')
-    def test_get_tenant_list_negative(self, get_org):
-        """
-        Test case for get tenant list negative
-        """
-        org_dict = {'vdcs': {}}
-        tenant_name = 'testosm'
-        get_org.return_value = org_dict
-
-        # call to VIM connector method
-        results = self.vim.get_tenant_list({'name' : tenant_name})
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(results, [])
-
-    @mock.patch.object(vimconnector,'create_vdc')
-    def test_new_tenant(self, create_vdc):
-        """
-        Test case for create new tenant
-        """
-        tenant_name = 'test'
-        vdc = {'a493aa2c-3104-4d63-969b-fc9e72304c9f': 'https://localhost/api/task/e658d84c-007d-4fd8-9590-3a8f93cc0de4'}
-        create_vdc.return_value = vdc
-
-        # call to VIM connector method
-        result = self.vim.new_tenant(tenant_name)
-        # assert verified expected and return result from VIM connector
-        self.assertEqual('a493aa2c-3104-4d63-969b-fc9e72304c9f', result)
-
-    @mock.patch.object(vimconnector,'create_vdc')
-    def test_new_tenant_negative(self, create_vdc):
-        """
-        Test case for create new tenant
-        """
-        tenant_name = 'test'
-        create_vdc.return_value = None
-
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnException,self.vim.new_tenant,tenant_name)
-
-    @mock.patch.object(vimconnector,'connect_as_admin')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'perform_request')
-    def test_delete_tenant(self, perform_request, connect, connect_as_admin):
-        """
-        Test case to delete tenant
-        """
-        tenant_id = '753227f5-d6c6-4478-9546-acc5cfff21e9'
-        delete_tenant_resp = xml_resp.delete_tenant
-
-        self.vim.client = self.vim.connect()
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = delete_tenant_resp),
-                                       mock.Mock(status_code = 202,
-                                       content = None)
-                                       ]
-
-        # call to VIM connector method
-        result = self.vim.delete_tenant(tenant_id)
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(tenant_id, result)
-
-    @mock.patch.object(vimconnector,'connect_as_admin')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'perform_request')
-    def test_delete_tenant_negative(self, perform_request, connect, connect_as_admin):
-        """
-        Test case to delete tenant
-        """
-        tenant_id = 'ten45klsjdf'
-
-        self.vim.client = self.vim.connect()
-        perform_request.return_value.status_code = 201
-
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnNotFoundException,self.vim.delete_tenant,tenant_id)
-
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(Org,'list_catalogs')
-    @mock.patch.object(vimconnector,'get_vcd_network')
-    @mock.patch.object(Org,'get_vdc')
-    @mock.patch.object(Org,'get_catalog_item')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'perform_request')
-    @mock.patch.object(Client,'get_task_monitor')
-    @mock.patch.object(VDC,'get_vapp')
-    @mock.patch.object(vimconnector,'get_network_list')
-    @mock.patch.object(vimconnector,'power_on_vapp')
-    def test_new_vminstance(self, power_on, get_network_list, get_vapp,
-                            get_task_monitor, perform_request, connect,
-                            get_catalog_item, get_vdc, get_vcd_network,
-                                       list_catalogs, get_vdc_details):
-        """
-        Test case for new vm instance
-        """
-        image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
-        vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
-                                                                            'disk': 10,
-                                                                            'ram': 1024}}
-
-        flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
-        net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
-
-        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
-
-        network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
-
-        network_list = [{'status': 'ACTIVE', 'name': 'default', 'admin_state_up': True, 'shared': False, 'tenant_id': '2584137f-6541-4c04-a2a2-e56bfca14c69', 'type': 'bridge', 'id': '1fd6421e-929a-4576-bc19-a0c48aea1969'}]
-
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        catalog_list = lxmlElementTree.fromstring(xml_resp.catalog_list_xml)
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = cat_list
-        get_vcd_network.return_value = network_dict
-        get_vdc.return_value = vdc
-        get_catalog_item.return_value = catalog_list
-        self.vim.client = self.vim.connect()
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = xml_resp.catalogItem_xml),
-                                       mock.Mock(status_code = 200,
-                                       content = xml_resp.vapp_template_xml),
-                                       mock.Mock(status_code = 201,
-                                       content = xml_resp.deployed_vapp_xml)]
-
-        status_resp = xml_resp.status_task_xml
-        status = lxmlElementTree.fromstring(status_resp)
-        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
-        vapp_resp = xml_resp.vapp_xml_response
-        vapp = lxmlElementTree.fromstring(vapp_resp)
-        get_vapp.return_value = vapp
-        get_network_list.return_value = network_list
-        power_on_resp = xml_resp.poweroff_task_xml
-        poweron = lxmlElementTree.fromstring(power_on_resp)
-        power_on.return_value = poweron
-
-        # call to VIM connector method
-        result = self.vim.new_vminstance(name='Test1_vm', image_id=image_id,
-                                                        flavor_id=flavor_id,
-                                                          net_list=net_list)
-        # assert verified expected and return result from VIM connector
-        self.assertIsNotNone(result)
-
-
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(Org,'list_catalogs')
-    @mock.patch.object(vimconnector,'get_vcd_network')
-    @mock.patch.object(Org,'get_vdc')
-    @mock.patch.object(Org,'get_catalog_item')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'perform_request')
-    @mock.patch.object(Client,'get_task_monitor')
-    @mock.patch.object(VDC,'get_vapp')
-    @mock.patch.object(vimconnector,'get_network_list')
-    @mock.patch.object(vimconnector,'power_on_vapp')
-    def test_new_vminstance_negative(self, power_on, get_network_list, get_vapp,
-                            get_task_monitor, perform_request, connect,
-                            get_catalog_item, get_vdc, get_vcd_network,
-                                       list_catalogs, get_vdc_details):
-        """
-        Test case for new vm instance
-        """
-        image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
-        vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
-                                                                            'disk': 10,
-                                                                            'ram': 1024}}
-        flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
-        net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
-
-        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
-
-        network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
-
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        catalog_list = lxmlElementTree.fromstring(xml_resp.catalog_list_xml)
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = cat_list
-        get_vcd_network.return_value = network_dict
-        get_vdc.return_value = vdc
-        get_catalog_item.return_value = catalog_list
-        self.vim.client = self.vim.connect()
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = xml_resp.catalogItem_xml),
-                                       mock.Mock(status_code = 200,
-                                       content = xml_resp.vapp_template_xml),
-                                       mock.Mock(status_code = 400,
-                                       content = "Bad request error")]
-
-        # call to VIM connector method
-        self.assertRaises(vimconnUnexpectedResponse,self.vim.new_vminstance,
-                                                                 name='Test1_vm',
-                                                                 image_id=image_id,
-                                                                 flavor_id=flavor_id,
-                                                                 net_list=net_list)
-
-    @mock.patch.object(vimconnector,'get_catalogid')
-    @mock.patch.object(vimconnector,'upload_vimimage')
-    @mock.patch.object(Org,'create_catalog')
-    @mock.patch.object(Org,'list_catalogs')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(path,'isfile')
-    @mock.patch.object(os,'access')
-    def test_new_image(self, access, isfile,
-                              get_vdc_details,
-                                list_catalogs,
-                               create_catalog,
-                               upload_vimimage,
-                                get_catalogid):
-        """
-        Test case for create new image
-        """
-        path = '/tmp/cirros/cirros.ovf'
-        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '9759-0d6799063b51', 'name': 'cirros'}]
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
-
-        # assumed return value from VIM connector
-        isfile.return_value = True
-        access.return_value = True
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = cat_list
-        create_catalog.return_value = catalog
-        upload_vimimage.return_value = True
-        get_catalogid.return_value = '9759-0d6799063b51'
-        result = self.vim.new_image({'name': 'TestImage', 'location' : path})
-
-        # assert verified expected and return result from VIM connector
-        self.assertIsNotNone(result)
-
-    @mock.patch.object(vimconnector,'get_catalogid')
-    @mock.patch.object(vimconnector,'upload_vimimage')
-    @mock.patch.object(Org,'create_catalog')
-    @mock.patch.object(Org,'list_catalogs')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    def test_new_image_negative(self, get_vdc_details, list_catalogs,
-                                              create_catalog,
-                                              upload_vimimage,
-                                              get_catalogid):
-        """
-        Test case for create new image with negative scenario
-        """
-        path = '/tmp/cirros/cirros.ovf'
-        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org1', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'test'}]
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
-
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = cat_list
-        create_catalog.return_value = catalog
-        upload_vimimage.return_value = False
-        get_catalogid.return_value = '34925a30-0f4a-4018-9759-0d6799063b51'
-
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnException,self.vim.new_image,{'name':'TestImage', 'location':path})
-
-    @mock.patch.object(vimconnector,'connect_as_admin')
-    @mock.patch.object(vimconnector,'perform_request')
-    def test_delete_image(self, perform_request, connect_as_admin):
-        """
-        Testcase to delete image by image id
-        """
-        image_id = 'f3bf3733-465b-419f-b675-52f91d18edbb'
-        # creating conn object
-        self.vim.client = self.vim.connect_as_admin()
-
-        # assumed return value from VIM connector
-        perform_request.side_effect = [mock.Mock(status_code = 200,
-                                       content = xml_resp.delete_catalog_xml_response),
-                                       mock.Mock(status_code = 200,
-                                       content = xml_resp.delete_catalog_item_xml_response),
-                                       mock.Mock(status_code = 204,
-                                       content = ''),
-                                       mock.Mock(status_code = 204,
-                                       content = '')
-                                       ]
-
-        # call to vim connctor method
-        result = self.vim.delete_image(image_id)
-        # assert verified expected and return result from VIM connector
-        self.assertEqual(image_id, result)
-
-    @mock.patch.object(vimconnector,'get_catalogid')
-    @mock.patch.object(vimconnector,'upload_vimimage')
-    @mock.patch.object(Org,'create_catalog')
-    @mock.patch.object(Org,'list_catalogs')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(path,'isfile')
-    @mock.patch.object(os,'access')
-    def test_get_image_id_from_path(self, access, isfile,
-                                              get_vdc_details,
-                                              list_catalogs,
-                                              create_catalog,
-                                              upload_vimimage,
-                                              get_catalogid):
-        """
-        Test case to get image id from image path
-        """
-        path = '/tmp/ubuntu/ubuntu.ovf'
-        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
-
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
-
-        # assumed return value from VIM connector
-        isfile.return_value = True
-        access.return_value = True
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = cat_list
-        create_catalog.return_value = catalog
-        upload_vimimage.return_value = True
-        get_catalogid.return_value = '7208-0f6777052c30'
-        result = self.vim.get_image_id_from_path(path=path)
-
-        # assert verified expected and return result from VIM connector
-        self.assertIsNotNone(result)
-
-    @mock.patch.object(vimconnector,'get_catalogid')
-    @mock.patch.object(vimconnector,'upload_vimimage')
-    @mock.patch.object(Org,'create_catalog')
-    @mock.patch.object(Org,'list_catalogs')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(path,'isfile')
-    @mock.patch.object(os,'access')
-    def test_get_image_id_from_path_negative(self, access, isfile,
-                                              get_vdc_details,
-                                              list_catalogs,
-                                              create_catalog,
-                                              upload_vimimage,
-                                              get_catalogid):
-        """
-        Test case to get image id from image path with negative scenario
-        """
-        path = '/tmp/ubuntu/ubuntu.ovf'
-        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
-
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
-
-        # assumed return value from VIM connector
-        isfile.return_value = True
-        access.return_value = True
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = cat_list
-        create_catalog.return_value = catalog
-        upload_vimimage.return_value = False
-        get_catalogid.return_value = '7208-0f6777052c30'
-        self.assertRaises(vimconnException, self.vim.get_image_id_from_path, path)
-
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(Org,'list_catalogs')
-    def test_get_image_list_negative(self, list_catalogs, connect, get_vdc_details):
-        """
-        Testcase to get image list by invalid image id
-        """
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        self.vim.client = self.vim.connect()
-
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}]
-
-        # call to vim connector method with invalid image id
-        self.vim.get_image_list({'id': 'b46c-3f35ba45ca4a'})
-
-    @mock.patch.object(vimconnector,'get_vapp_details_rest')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    def test_get_vminstance_negative(self, get_vdc_details, get_vapp_details_rest):
-        """
-        Testcase to get vminstance by invalid vm id
-        """
-
-        invalid_vmid = '18743edb0c8b-sdfsf-fg'
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        get_vapp_details_rest.return_value = False
-
-        # assert verified expected and return result from VIM connector
-        self.assertRaises(vimconnNotFoundException, self.vim.get_vminstance,invalid_vmid)
-
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'get_namebyvappid')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    @mock.patch.object(VDC,'get_vapp')
-    def test_delete_vminstance_negative(self, get_vapp, get_vdc_details,
-                                             get_namebyvappid, connect):
-        """
-        Testcase to delete vminstance by invalid vm id
-        """
-        vm_id = 'sdfrtt4935-87a1-0e4dc9c3a069'
-        vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-
-        # assumed return value from VIM connector
-        self.vim.client = self.vim.connect()
-        get_vdc_details.return_value = self.org, vdc
-        get_namebyvappid.return_name = vm_name
-
-        get_vapp.return_value = None
-
-        # call to VIM connector method
-        self.assertRaises(vimconnException, self.vim.delete_vminstance,vm_id)
-
-    @mock.patch.object(vimconnector,'get_vcd_network')
-    def test_refresh_nets_status_negative(self, get_vcd_network):
-        """
-        Testcase for refresh nets status by invalid vm id
-        """
-        net_id = 'sjkldf-456mfd-345'
-
-        # assumed return value from VIM connector
-        get_vcd_network.return_value = None
-        result = self.vim.refresh_nets_status([net_id])
-
-        # assert verified expected and return result from VIM connector
-        for attr in result[net_id]:
-            if attr == 'status':
-                self.assertEqual(result[net_id][attr], 'DELETED')
-
-    @mock.patch.object(vimconnector,'connect')
-    @mock.patch.object(vimconnector,'get_namebyvappid')
-    @mock.patch.object(vimconnector,'get_vdc_details')
-    def test_action_vminstance_negative(self, get_vdc_details,
-                                             get_namebyvappid,
-                                                     connect):
-        """
-        Testcase for action vm instance by invalid action
-        """
-        vm_id = '8413-4cb8-bad7-b5afaec6f9fa'
-        # created vdc object
-        vdc_xml_resp = xml_resp.vdc_xml_response
-        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
-        # assumed return value from VIM connector
-        get_vdc_details.return_value = self.org, vdc
-        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
-        self.vim.client = self.vim.connect()
-
-        # call to VIM connector method
-        self.assertRaises(vimconnException, self.vim.action_vminstance, vm_id,{'invalid': None})
diff --git a/osm_ro/tests/test_vimconn_vmware_xml_response.py b/osm_ro/tests/test_vimconn_vmware_xml_response.py
deleted file mode 100644 (file)
index 968cb1f..0000000
+++ /dev/null
@@ -1,637 +0,0 @@
-# -*- coding: utf-8 -*-\r
-\r
-##\r
-# Copyright 2016-2017 VMware Inc.\r
-# This file is part of ETSI OSM\r
-# All Rights Reserved.\r
-#\r
-# Licensed under the Apache License, Version 2.0 (the "License"); you may\r
-# not use this file except in compliance with the License. You may obtain\r
-# a copy of the License at\r
-#\r
-#         http://www.apache.org/licenses/LICENSE-2.0\r
-#\r
-# Unless required by applicable law or agreed to in writing, software\r
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
-# License for the specific language governing permissions and limitations\r
-# under the License.\r
-#\r
-# For those usages not covered by the Apache License, Version 2.0 please\r
-# contact:  osslegalrouting@vmware.com\r
-##\r
-\r
-vdc_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-        <Vdc xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="Org3-VDC-PVDC1" id="urn:vcloud:vdc:2584137f-6541-4c04-a2a2-e56bfca14c69" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-               <Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
-               <Link rel="down" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-               <Link rel="edit" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/uploadVAppTemplate" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/media" type="application/vnd.vmware.vcloud.media+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/instantiateOvf" type="application/vnd.vmware.vcloud.instantiateOvfParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/instantiateVAppTemplate" type="application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneVApp" type="application/vnd.vmware.vcloud.cloneVAppParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneVAppTemplate" type="application/vnd.vmware.vcloud.cloneVAppTemplateParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneMedia" type="application/vnd.vmware.vcloud.cloneMediaParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/composeVApp" type="application/vnd.vmware.vcloud.composeVAppParams+xml"/>\r
-               <Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/disk" type="application/vnd.vmware.vcloud.diskCreateParams+xml"/>\r
-               <Link rel="edgeGateways" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/edgeGateways" type="application/vnd.vmware.vcloud.query.records+xml"/>\r
-               <Link rel="add" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/networks" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\r
-               <Link rel="orgVdcNetworks" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/networks" type="application/vnd.vmware.vcloud.query.records+xml"/>\r
-               <Link rel="alternate" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.admin.vdc+xml"/>\r
-               <Description>Org3-VDC-PVDC1</Description>\r
-               <AllocationModel>AllocationVApp</AllocationModel>\r
-               <ComputeCapacity>\r
-               <Cpu>\r
-               <Units>MHz</Units>\r
-               <Allocated>0</Allocated>\r
-               <Limit>0</Limit>\r
-               <Reserved>0</Reserved>\r
-               <Used>2000</Used>\r
-               <Overhead>0</Overhead>\r
-               </Cpu>\r
-               <Memory>\r
-               <Units>MB</Units>\r
-               <Allocated>0</Allocated>\r
-               <Limit>0</Limit>\r
-               <Reserved>0</Reserved>\r
-               <Used>2048</Used>\r
-               <Overhead>71</Overhead>\r
-               </Memory>\r
-               </ComputeCapacity>\r
-               <ResourceEntities>\r
-               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-2999a787-ca96-4d1c-8b7c-9d0a8bd14bce" name="cirros" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
-        <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-324649a3-d263-4446-aace-4e2c801a85bd" name="cirros_10" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-8ea35d43-0c72-4267-bac9-42e4a5248c32" name="Test_Cirros" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-9bf292a2-58c4-4d4b-995b-623e88b74226" name="Ubuntu-vm" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-be93140e-da0d-4b8c-8ab4-06d132bf47c0" name="Ubuntu16" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vApp/vapp-0da5344d-4d65-4362-bac6-e8524c97edb1" name="Inst10.linux1.a-e9f75c31-eadf-4b48-9a5e-d957314530d7" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vApp/vapp-3e0df975-1380-4544-9f25-0683f9eb41f0" name="Inst12.linux1.a-93854e6d-d87c-4f0a-ba10-eaf59d7555bf" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vApp/vapp-6f5848b8-5498-4854-a35e-45cb25b8fdb0" name="Inst11.linux1.a-5ca666e8-e077-4268-aff2-99960af28eb5" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-               <ResourceEntity href="https://localhost/api/vApp/vapp-76510a06-c949-4bea-baad-629daaccb84a" name="cirros_nsd.cirros_vnfd__1.a-a9c957c4-29a5-4559-a630-00ae028592f7" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-               </ResourceEntities><AvailableNetworks><Network href="https://localhost/api/network/1627b438-68bf-44be-800c-8f48029761f6" name="default-17c27654-2a45-4713-a799-94cb91de2610" type="application/vnd.vmware.vcloud.network+xml"/>\r
-               <Network href="https://localhost/api/network/190e9e04-a904-412b-877e-92d8e8699abd" name="cirros_nsd.cirros_nsd_vld1-86c861a9-d985-4e31-9c20-21de1e8a619d" type="application/vnd.vmware.vcloud.network+xml"/>\r
-               <Network href="https://localhost/api/network/3838c23e-cb0e-492f-a91f-f3352918ff8b" name="cirros_nsd.cirros_nsd_vld1-75ce0375-b2e6-4b7f-b821-5b395276bcd8" type="application/vnd.vmware.vcloud.network+xml"/>\r
-               <Network href="https://localhost/api/network/5aca5c32-c0a2-4e1b-980e-8fd906a49f4e" name="default-60a54140-66dd-4806-8ca3-069d34530478" type="application/vnd.vmware.vcloud.network+xml"/>\r
-               <Network href="https://localhost/api/network/de854aa2-0b77-4ace-a696-85494a3dc3c4" name="default-971acee6-0298-4085-b107-7601bc8c8712" type="application/vnd.vmware.vcloud.network+xml"/>\r
-               </AvailableNetworks>\r
-               <Capabilities>\r
-               <SupportedHardwareVersions>\r
-               <SupportedHardwareVersion>vmx-04</SupportedHardwareVersion>\r
-               <SupportedHardwareVersion>vmx-07</SupportedHardwareVersion>\r
-               <SupportedHardwareVersion>vmx-08</SupportedHardwareVersion>\r
-               <SupportedHardwareVersion>vmx-09</SupportedHardwareVersion>\r
-               <SupportedHardwareVersion>vmx-10</SupportedHardwareVersion>\r
-               <SupportedHardwareVersion>vmx-11</SupportedHardwareVersion>\r
-               </SupportedHardwareVersions>\r
-               </Capabilities>\r
-               <NicQuota>0</NicQuota>\r
-               <NetworkQuota>1000</NetworkQuota>\r
-               <UsedNetworkCount>0</UsedNetworkCount>\r
-               <VmQuota>0</VmQuota>\r
-               <IsEnabled>true</IsEnabled>\r
-               <VdcStorageProfiles>\r
-               <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/3b82941c-11ed-407e-ada0-42d282fcd425" name="NFS Storage Policy" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\r
-               <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\r
-               </VdcStorageProfiles>\r
-        <VCpuInMhz2>1000</VCpuInMhz2>\r
-        </Vdc>"""\r
-\r
-network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-             <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="testing_6XXftDTroat1-03b18565-de01-4154-af51-8dbea42f0d84" id="urn:vcloud:network:5c04dc6d-6096-47c6-b72b-68f19013d491" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-             <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-             <Link rel="down" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-             <Link rel="down" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491/allocatedAddresses/" type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>\r
-             <Description>Openmano created</Description>\r
-             <Configuration>\r
-             <IpScopes>\r
-             <IpScope>\r
-             <IsInherited>true</IsInherited>\r
-             <Gateway>12.169.24.23</Gateway>\r
-             <Netmask>255.255.255.0</Netmask>\r
-             <Dns1>12.169.24.102</Dns1>\r
-             <DnsSuffix>corp.local</DnsSuffix>\r
-             <IsEnabled>true</IsEnabled>\r
-             <IpRanges>\r
-             <IpRange>\r
-             <StartAddress>12.169.24.115</StartAddress>\r
-             <EndAddress>12.169.241.150</EndAddress>\r
-             </IpRange>\r
-             </IpRanges>\r
-             </IpScope>\r
-             </IpScopes>\r
-             <FenceMode>bridged</FenceMode>\r
-             <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>\r
-             </Configuration>\r
-             <IsShared>false</IsShared>\r
-             </OrgVdcNetwork>"""\r
-\r
-delete_network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-            <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="testing_negjXxdlB-7fdcf9f3-de32-4ae6-b9f9-fb725a80a74f" id="urn:vcloud:network:0a55e5d1-43a2-4688-bc92-cb304046bf87" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-                       <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-                       <Link rel="down" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-                       <Link rel="down" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87/allocatedAddresses/"  type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>\r
-                       <Description>Openmano created</Description>\r
-                       <Configuration>\r
-                       <IpScopes>\r
-                       <IpScope>\r
-                       <IsInherited>true</IsInherited>\r
-                       <Gateway>12.169.24.23</Gateway>\r
-                       <Netmask>255.255.255.0</Netmask>\r
-                       <Dns1>12.169.24.102</Dns1>\r
-                       <DnsSuffix>corp.local</DnsSuffix>\r
-                       <IsEnabled>true</IsEnabled>\r
-                       <IpRanges>\r
-                       <IpRange>\r
-                       <StartAddress>12.169.241.115</StartAddress>\r
-                       <EndAddress>12.169.241.150</EndAddress>\r
-                       </IpRange></IpRanges></IpScope>\r
-                       </IpScopes>\r
-                       <FenceMode>bridged</FenceMode>\r
-                       <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>\r
-                       </Configuration>\r
-                       <IsShared>false</IsShared>\r
-                       </OrgVdcNetwork>"""\r
-\r
-create_network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-            <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" name="Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a" id="urn:vcloud:network:df1956fa-da04-419e-a6a2-427b6f83788f" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-            <Link rel="edit" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\r
-            <Link rel="remove" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f"/>\r
-            <Link rel="repair" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/action/reset"/>\r
-            <Link rel="up" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.admin.vdc+xml"/>\r
-            <Link rel="down" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-            <Link rel="down" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/allocatedAddresses/" type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>\r
-            <Description>Openmano created</Description>\r
-            <Tasks>\r
-                  <Task cancelRequested="false" expiryTime="2017-12-14T02:00:39.865-08:00" operation="Creating Network Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a(df1956fa-da04-419e-a6a2-427b6f83788f)" operationName="networkCreateOrgVdcNetwork" serviceNamespace="com.vmware.vcloud" startTime="2017-09-15T02:00:39.865-07:00" status="queued" name="task" id="urn:vcloud:task:0600f592-42ce-4d58-85c0-212c569ba6e6" href="https://localhost/api/task/0600f592-42ce-4d58-85c0-212c569ba6e6" type="application/vnd.vmware.vcloud.task+xml">\r
-                  <Owner href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" name="Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a" type="application/vnd.vmware.admin.network+xml"/>\r
-                  <User href="https://localhost/api/admin/user/f49f28e0-7172-4b17-aaee-d171ce2b60da" name="administrator" type="application/vnd.vmware.admin.user+xml"/>\r
-                  <Organization href="https://localhost/api/org/a93c9db9-7471-3192-8d09-a8f7eeda85f9" name="System" type="application/vnd.vmware.vcloud.org+xml"/>\r
-                  <Details/>\r
-                  </Task>\r
-            </Tasks>\r
-            <Configuration>\r
-            <IpScopes><IpScope>\r
-            <IsInherited>false</IsInherited>\r
-            <Gateway>12.16.113.1</Gateway>\r
-            <Netmask>255.255.255.0</Netmask>\r
-            <Dns1>12.16.113.2</Dns1>\r
-            <IsEnabled>true</IsEnabled>\r
-            <IpRanges><IpRange>\r
-            <StartAddress>12.168.113.3</StartAddress>\r
-            <EndAddress>12.168.113.52</EndAddress>\r
-            </IpRange></IpRanges>\r
-            </IpScope></IpScopes>\r
-            <ParentNetwork href="https://localhost/api/admin/network/19b01b42-c862-4d0f-bcbf-d053e7396fc0" name="" type="application/vnd.vmware.admin.network+xml"/>\r
-            <FenceMode>bridged</FenceMode>\r
-            <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>\r
-            </Configuration><IsShared>false</IsShared>\r
-            </OrgVdcNetwork>"""\r
-\r
-catalog1_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="Ubuntu-vm" id="urn:vcloud:catalog:d0a11b12-780e-4681-babb-2b1fd6693f62" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
-<Link rel="copy" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
-<Link rel="move" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/> <Description>Ubuntu-vm</Description>\r
-<CatalogItems><CatalogItem href="https://localhost/api/catalogItem/04fc0041-8e40-4e37-b072-7dba3e1c6a30" id="04fc0041-8e40-4e37-b072-7dba3e1c6a30" name="Ubuntu-vm" type="application/vnd.vmware.vcloud.catalogItem+xml"/></CatalogItems><IsPublished>false</IsPublished><DateCreated>2017-03-17T03:17:11.293-07:00</DateCreated><VersionNumber>5</VersionNumber>\r
-</Catalog>"""\r
-\r
-catalog2_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="cirros" id="urn:vcloud:catalog:32ccb082-4a65-41f6-bcd6-38942e8a3829" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
-<Link rel="copy" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
-<Link rel="move" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/> <Description>cirros</Description>\r
-<CatalogItems><CatalogItem href="https://localhost/api/catalogItem/98316d41-e38c-40c2-ac28-5462e8aada8c" id="98316d41-e38c-40c2-ac28-5462e8aada8c" name="cirros" type="application/vnd.vmware.vcloud.catalogItem+xml"/></CatalogItems><IsPublished>false</IsPublished><DateCreated>2017-03-08T02:06:07.003-08:00</DateCreated><VersionNumber>5</VersionNumber>\r
-</Catalog>"""\r
-\r
-vapp_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-<VApp xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ovfDescriptorUploaded="true" deployed="true" status="4" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" id="urn:vcloud:vapp:4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/ovf/environment/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.1.0.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\r
-<Link rel="power:powerOff" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/powerOff"/>\r
-<Link rel="power:reboot" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/reboot"/>\r
-<Link rel="power:reset" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/reset"/>\r
-<Link rel="power:shutdown" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/shutdown"/>\r
-<Link rel="power:suspend" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/suspend"/>\r
-<Link rel="deploy" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/deploy" type="application/vnd.vmware.vcloud.deployVAppParams+xml"/>\r
-<Link rel="undeploy" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/9489a59a-0339-4151-9667-f5b90296c36d" name="External-Network-1074" type="application/vnd.vmware.vcloud.vAppNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/379f083b-4057-4724-a128-ed5bc6672591" name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d" type="application/vnd.vmware.vcloud.vAppNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="ovf" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/ovf" type="text/xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\r
-<Link rel="snapshot:create" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>\r
-<LeaseSettingsSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml" ovf:required="false">\r
-<ovf:Info>Lease settings section</ovf:Info>\r
-<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml"/> <DeploymentLeaseInSeconds>0</DeploymentLeaseInSeconds><StorageLeaseInSeconds>7776000</StorageLeaseInSeconds></LeaseSettingsSection>\r
-<ovf:StartupSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.startupSection+xml" vcloud:href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/startupSection/"><ovf:Info>VApp startup section</ovf:Info>\r
-<ovf:Item ovf:id="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" ovf:order="0" ovf:startAction="powerOn" ovf:startDelay="0" ovf:stopAction="powerOff" ovf:stopDelay="0"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/startupSection/" type="application/vnd.vmware.vcloud.startupSection+xml"/> </ovf:StartupSection><ovf:NetworkSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.networkSection+xml" vcloud:href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkSection/"><ovf:Info>The list of logical networks</ovf:Info>\r
-<ovf:Network ovf:name="External-Network-1074"><ovf:Description>External-Network-1074</ovf:Description></ovf:Network>\r
-<ovf:Network ovf:name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"><ovf:Description/></ovf:Network></ovf:NetworkSection>\r
-<NetworkConfigSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkConfigSection/" type="application/vnd.vmware.vcloud.networkConfigSection+xml" ovf:required="false"><ovf:Info>The configuration parameters for logical networks</ovf:Info>\r
-<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkConfigSection/"   type="application/vnd.vmware.vcloud.networkConfigSection+xml"/><NetworkConfig networkName="External-Network-1074"><Link rel="repair" href="https://localhost/api/admin/network/9489a59a-0339-4151-9667-f5b90296c36d/action/reset"/>\r
-<Description>External-Network-1074</Description><Configuration><IpScopes><IpScope><IsInherited>false</IsInherited><Gateway>192.168.254.1</Gateway><Netmask>255.255.255.0</Netmask>\r
-<IsEnabled>true</IsEnabled><IpRanges><IpRange><StartAddress>192.168.254.100</StartAddress><EndAddress>192.168.254.199</EndAddress></IpRange></IpRanges></IpScope></IpScopes>\r
-<FenceMode>isolated</FenceMode><RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments></Configuration><IsDeployed>true</IsDeployed></NetworkConfig>\r
-<NetworkConfig networkName="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d">\r
-<Link rel="repair" href="https://localhost/api/admin/network/379f083b-4057-4724-a128-ed5bc6672591/action/reset"/><Description/><Configuration><IpScopes><IpScope><IsInherited>true</IsInherited>\r
-<Gateway>192.169.241.253</Gateway><Netmask>255.255.255.0</Netmask><Dns1>192.169.241.102</Dns1><DnsSuffix>corp.local</DnsSuffix><IsEnabled>true</IsEnabled><IpRanges><IpRange>\r
-<StartAddress>192.169.241.115</StartAddress><EndAddress>192.169.241.150</EndAddress></IpRange></IpRanges></IpScope></IpScopes>\r
-<ParentNetwork href="https://localhost/api/admin/network/d4307ff7-0e34-4d41-aab0-4c231a045088" id="d4307ff7-0e34-4d41-aab0-4c231a045088" name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"/><FenceMode>bridged</FenceMode><RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments></Configuration>\r
-<IsDeployed>true</IsDeployed></NetworkConfig></NetworkConfigSection><SnapshotSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false"><ovf:Info>Snapshot information section</ovf:Info></SnapshotSection><DateCreated>2017-09-21T01:15:31.627-07:00</DateCreated><Owner type="application/vnd.vmware.vcloud.owner+xml">\r
-<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
-</Owner><InMaintenanceMode>false</InMaintenanceMode><Children>\r
-<Vm needsCustomization="false" nestedHypervisorEnabled="false" deployed="true" status="4" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" id="urn:vcloud:vm:47d12505-5968-4e16-95a7-18743edb0c8b" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b" type="application/vnd.vmware.vcloud.vm+xml">\r
-<Link rel="power:powerOff" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/powerOff"/>\r
-<Link rel="power:reboot" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/reboot"/>\r
-<Link rel="power:reset" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/reset"/>\r
-<Link rel="power:shutdown" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/shutdown"/>\r
-<Link rel="power:suspend" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/suspend"/>\r
-<Link rel="undeploy" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b" type="application/vnd.vmware.vcloud.vm+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
-<Link rel="metrics" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
-<Link rel="metrics" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
-<Link rel="screen:thumbnail" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen"/>\r
-<Link rel="screen:acquireTicket" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket"/>\r
-<Link rel="screen:acquireMksTicket" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket" type="application/vnd.vmware.vcloud.mksTicket+xml"/>\r
-<Link rel="media:insertMedia" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/media/action/insertMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
-<Link rel="media:ejectMedia" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/media/action/ejectMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
-<Link rel="disk:attach" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/disk/action/attach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
-<Link rel="disk:detach" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/disk/action/detach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
-<Link rel="installVmwareTools" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/installVMwareTools"/>\r
-<Link rel="customizeAtNextPowerOn" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/customizeAtNextPowerOn"/>\r
-<Link rel="snapshot:create" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>\r
-<Link rel="reconfigureVm" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/reconfigureVm" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" type="application/vnd.vmware.vcloud.vm+xml"/>\r
-<Link rel="up" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml"/><Description>Ubuntu-vm</Description>  <ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/"><ovf:Info>Virtual hardware requirements</ovf:Info><ovf:System><vssd:ElementName>Virtual Hardware Family</vssd:ElementName><vssd:InstanceID>0</vssd:InstanceID>    <vssd:VirtualSystemIdentifier>Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa</vssd:VirtualSystemIdentifier><vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType></ovf:System><ovf:Item>    <rasd:Address>00:50:56:01:12:a2</rasd:Address><rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>    <rasd:Connection vcloud:ipAddressingMode="DHCP" vcloud:ipAddress="12.19.21.20" vcloud:primaryNetworkConnection="true">testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d</rasd:Connection>    <rasd:Description>Vmxnet3 ethernet adapter on "testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"</rasd:Description>    <rasd:ElementName>Network adapter 0</rasd:ElementName>    <rasd:InstanceID>1</rasd:InstanceID>    <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>    <rasd:ResourceType>10</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:Address>0</rasd:Address>    <rasd:Description>SCSI Controller</rasd:Description>    <rasd:ElementName>SCSI Controller 0</rasd:ElementName>    <rasd:InstanceID>2</rasd:InstanceID>    <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>    <rasd:ResourceType>6</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:Description>Hard disk</rasd:Description>    <rasd:ElementName>Hard disk 1</rasd:ElementName>    <rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="40960" vcloud:storageProfileOverrideVmDefault="false"/>    <rasd:InstanceID>2000</rasd:InstanceID>    <rasd:Parent>2</rasd:Parent>    <rasd:ResourceType>17</rasd:ResourceType>    <rasd:VirtualQuantity>42949672960</rasd:VirtualQuantity>    <rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits></ovf:Item><ovf:Item>    <rasd:Address>0</rasd:Address>    <rasd:Description>SATA Controller</rasd:Description>    <rasd:ElementName>SATA Controller 0</rasd:ElementName>    <rasd:InstanceID>3</rasd:InstanceID>    <rasd:ResourceSubType>vmware.sata.ahci</rasd:ResourceSubType>    <rasd:ResourceType>20</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>    <rasd:Description>CD/DVD Drive</rasd:Description>    <rasd:ElementName>CD/DVD Drive 1</rasd:ElementName>    <rasd:HostResource/>    <rasd:InstanceID>16000</rasd:InstanceID>    <rasd:Parent>3</rasd:Parent>    <rasd:ResourceType>15</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>    <rasd:Description>Floppy Drive</rasd:Description>    <rasd:ElementName>Floppy Drive 1</rasd:ElementName>    <rasd:HostResource/>    <rasd:InstanceID>8000</rasd:InstanceID>    <rasd:ResourceType>14</rasd:ResourceType></ovf:Item><ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu">    <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>    <rasd:Description>Number of Virtual CPUs</rasd:Description>    <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>    <rasd:InstanceID>4</rasd:InstanceID>    <rasd:Reservation>0</rasd:Reservation>    <rasd:ResourceType>3</rasd:ResourceType>    <rasd:VirtualQuantity>1</rasd:VirtualQuantity>    <rasd:Weight>0</rasd:Weight>    <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>    <Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item><ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory">    <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>    <rasd:Description>Memory Size</rasd:Description>    <rasd:ElementName>1024 MB of memory</rasd:ElementName>    <rasd:InstanceID>5</rasd:InstanceID>    <rasd:Reservation>0</rasd:Reservation>    <rasd:ResourceType>4</rasd:ResourceType>    <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>    <rasd:Weight>0</rasd:Weight>    <Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/" type="application/vnd.vmware.vcloud.virtualHardwareSection+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/></ovf:VirtualHardwareSection><ovf:OperatingSystemSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:id="94" vcloud:type="application/vnd.vmware.vcloud.operatingSystemSection+xml" vmw:osType="ubuntu64Guest" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/operatingSystemSection/"><ovf:Info>Specifies the operating system installed</ovf:Info><ovf:Description>Ubuntu Linux (64-bit)</ovf:Description><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/operatingSystemSection/" type="application/vnd.vmware.vcloud.operatingSystemSection+xml"/></ovf:OperatingSystemSection><NetworkConnectionSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false"><ovf:Info>Specifies the available VM network connections</ovf:Info><PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex><NetworkConnection needsCustomization="false" network="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d">    <NetworkConnectionIndex>0</NetworkConnectionIndex>    <IpAddress>12.19.21.20</IpAddress>    <IsConnected>true</IsConnected>    <MACAddress>00:50:56:01:12:a2</MACAddress>    <IpAddressAllocationMode>DHCP</IpAddressAllocationMode></NetworkConnection><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/></NetworkConnectionSection><GuestCustomizationSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false"><ovf:Info>Specifies Guest OS Customization Settings</ovf:Info><Enabled>true</Enabled><ChangeSid>false</ChangeSid><VirtualMachineId>47d12505-5968-4e16-95a7-18743edb0c8b</VirtualMachineId><JoinDomainEnabled>false</JoinDomainEnabled><UseOrgSettings>false</UseOrgSettings><AdminPasswordEnabled>false</AdminPasswordEnabled><AdminPasswordAuto>true</AdminPasswordAuto><AdminAutoLogonEnabled>false</AdminAutoLogonEnabled><AdminAutoLogonCount>0</AdminAutoLogonCount><ResetPasswordRequired>false</ResetPasswordRequired><ComputerName>Ubuntu-vm-001</ComputerName><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml"/></GuestCustomizationSection><RuntimeInfoSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/runtimeInfoSection"><ovf:Info>Specifies Runtime info</ovf:Info><VMWareTools version="2147483647"/></RuntimeInfoSection><SnapshotSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false"><ovf:Info>Snapshot information section</ovf:Info></SnapshotSection><DateCreated>2017-09-21T01:15:53.863-07:00</DateCreated><VAppScopedLocalId>Ubuntu-vm</VAppScopedLocalId><ovfenv:Environment xmlns:ns11="http://www.vmware.com/schema/ovfenv" ovfenv:id="" ns11:vCenterId="vm-7833"><ovfenv:PlatformSection>    <ovfenv:Kind>VMware ESXi</ovfenv:Kind>    <ovfenv:Version>6.0.0</ovfenv:Version>    <ovfenv:Vendor>VMware, Inc.</ovfenv:Vendor>    <ovfenv:Locale>en</ovfenv:Locale></ovfenv:PlatformSection><ovfenv:PropertySection>    <ovfenv:Property ovfenv:key="vCloud_UseSysPrep" ovfenv:value="None"/>    <ovfenv:Property ovfenv:key="vCloud_bitMask" ovfenv:value="1"/>    <ovfenv:Property ovfenv:key="vCloud_bootproto_0" ovfenv:value="dhcp"/>    <ovfenv:Property ovfenv:key="vCloud_computerName" ovfenv:value="Ubuntu-vm-001"/>    <ovfenv:Property ovfenv:key="vCloud_macaddr_0" ovfenv:value="00:50:56:01:12:a2"/>    <ovfenv:Property ovfenv:key="vCloud_markerid" ovfenv:value="c743cbe8-136e-4cf8-9e42-b291646b8058"/>    <ovfenv:Property ovfenv:key="vCloud_numnics" ovfenv:value="1"/>    <ovfenv:Property ovfenv:key="vCloud_primaryNic" ovfenv:value="0"/>    <ovfenv:Property ovfenv:key="vCloud_reconfigToken" ovfenv:value="246124151"/>    <ovfenv:Property ovfenv:key="vCloud_resetPassword" ovfenv:value="0"/></ovfenv:PropertySection><ve:EthernetAdapterSection xmlns:ve="http://www.vmware.com/schema/ovfenv" xmlns="http://schemas.dmtf.org/ovf/environment/1" xmlns:oe="http://schemas.dmtf.org/ovf/environment/1">    <ve:Adapter ve:mac="00:50:56:01:12:a2" ve:network="DPG-MGMT-3151" ve:unitNumber="7"/></ve:EthernetAdapterSection></ovfenv:Environment><VmCapabilities href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"/><MemoryHotAddEnabled>false</MemoryHotAddEnabled><CpuHotAddEnabled>false</CpuHotAddEnabled></VmCapabilities><StorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/></Vm></Children></VApp>"""\r
-\r
-poweroff_task_xml = """<?xml version="1.0" encoding="UTF-8"?>\r
-                <Task xmlns="http://www.vmware.com/vcloud/v1.5" cancelRequested="false" expiryTime="2017-12-22T23:18:23.040-08:00" operation="Powering Off Virtual Application Test1_vm-f370dafc-4aad-4415-bad9-68509dda67c9(f26ebf0a-f675-4622-83a6-64c6401769ac)" operationName="vappPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2017-09-23T23:18:23.040-07:00" status="queued" name="task" id="urn:vcloud:task:26975b6e-310e-4ed9-914e-ba7051eaabcb" href="https://localhost/api/task/26975b6e-310e-4ed9-914e-ba7051eaabcb" type="application/vnd.vmware.vcloud.task+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-               <Owner href="https://localhost/api/vApp/vapp-f26ebf0a-f675-4622-83a6-64c6401769ac" name="Test1_vm-f370dafc-4aad-4415-bad9-68509dda67c9" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-               <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
-               <Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
-               <Details/>\r
-               </Task>"""\r
-\r
-org_xml_response = """<Org xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Org3" id="urn:vcloud:org:2cb3dffb-5c51-4355-8406-28553ead28ac" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Link rel="down" href="https://localhost/api/vdc/216648ae-1b91-412b-b821-e4c301ff27d2" name="osm" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-<Link rel="down" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" name="Org3-VDC-PVDC1" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-<Link rel="down" href="https://localhost/api/vdc/414fdda9-3556-478c-a496-2deeec39cd30" name="osm1" type="application/vnd.vmware.vcloud.vdc+xml"/>\r
-<Link rel="down" href="https://localhost/api/tasksList/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.tasksList+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a" name="cirros034" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51" name="Ubuntu_1nic" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46" name="cirros_10" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d" name="Test_Cirros" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2" name="de4ffcf2ad21f1a5d0714d6b868e2645" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890" name="Ubuntu_plugtest-1" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/b31e6973-86d2-404b-a522-b16846d099dc" name="Ubuntu_Cat" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/b31e6973-86d2-404b-a522-b16846d099dc/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031" name="Ubuntu16" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="add" href="https://localhost/api/admin/org/2cb3dffb-5c51-4355-8406-28553ead28ac/catalogs" type="application/vnd.vmware.admin.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/090ffa68-9be6-4d74-af45-9a071544a633" name="default.cirros_ns.cirros_nsd_vld1-73a7d683-af17-49ff-95d3-72f8feb25537" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/17f3a12f-16f8-44a1-99e9-9a0122a7ac41" name="default.ass.management-3979591d-ea4e-4254-b4c4-4052107e4aca" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/326aee79-4f5c-439c-8ead-1bbfa42d2e51" name="default.Testvm11.management-fe46ba91-3b36-4964-9ad2-e91b475b3d23" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/420c24c7-89e9-49e5-ba6d-d21bfb9af94b" name="cirros_nsd_vld1-ea8aec47-0a6c-4fdb-814f-7a743e31407a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/42cba4fd-7baa-4f53-bda0-b36dada672d0" name="default.cirros_ns.cirros_nsd_vld1-44dff01a-2bdb-4096-a916-7e9826bfa401" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/4ae9fec5-7ed0-4d5e-b0f3-f5289bdf6471" name="default.cirros_ns.cirros_nsd_vld1-9f547589-37b7-4d7d-8890-8d3dd479ff5b" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/4b2ecfa9-6a70-4fe4-9d79-b3f74df91e85" name="default.cirros_ns.cirros_nsd_vld1-43852bce-6109-4949-b63a-deec9d7daab2" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/693f72af-ae42-42e5-956e-25723628bf26" name="default.cirros_ns.cirros_nsd_vld1-8cd70d26-ba81-4a04-aa82-67a994b3e21c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/6d9fbd4c-f0b9-4033-a13f-a7c8990b01de" name="default.vcd.management-f05b9ad3-7480-4ee6-ab8d-92b1f3c0b265" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/6e3e9f57-cee4-433a-883b-0bbe9760e99d" name="default" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/7787cdd7-9577-4966-ba72-8fbbff5d2553" name="default.cirros_ns.cirros_nsd_vld1-ab1f2288-ff59-488c-af02-c8d5e34e0847" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/7fa723e3-cd6c-4680-9522-e644eb31a188" name="default.cirros_ns.cirros_nsd_vld1-285865bb-736c-4b3d-8618-d755928daf5c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/9030a222-4562-43a0-abc6-aa60c7c1aae0" name="default.cirros_ns.cirros_nsd_vld1-57248151-de72-4313-a84f-b090d8c3feb8" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/a4bd508c-1325-41b0-8c25-61cb7b83cde7" name="default.cirros_ns.cirros_nsd_vld1-491dfb8d-6b4b-41ab-b3e8-a5148e110bba" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/a719292f-0a7f-4e03-a346-183f23f3e60c" name="default.cirros_ns.cirros_nsd_vld1-7ba57204-eed1-4dc8-8698-60a71bbae715" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/a73574ef-16d4-4357-adbf-a0997eb5eb75" name="default.cirros_ns.cirros_nsd_vld1-4430f367-3fc8-4367-9bf1-96dbc244abe6" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/a94e9ba0-e959-47d6-87c0-70e8cb1b485a" name="default.cirros_ns.cirros_nsd_vld1-c56c51c5-e5a8-44fe-9d36-1f2cbd9a7137" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/ab88a587-ff82-4fa7-8225-c0e3eddbf6e6" name="cirros_nsd_vld1-0ed4b7e9-dd56-4f8b-b92f-829b9de95f66" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/b141d722-c96b-4ac5-90da-3d407d376431" name="cirros_nsd_vld1-ad2ebea3-7a0b-4995-91bb-c16bc6fd4b0e" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/c4d61fd6-4d1e-446c-949f-9eb42e0ccc63" name="default.cirros_ns.cirros_nsd_vld1-021a0669-1833-4a0b-a782-30ceed2cca7a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/cd466f6f-fdc5-404a-9136-320aaa9e3c16" name="default.cirros_ns.cirros_nsd_vld1-22e6962e-6488-47ad-bfad-41bc599abfcd" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/cde04227-8f87-4956-b1f1-9f1be1241b8b" name="default.cirros_ns.cirros_nsd_vld1-629da038-a216-48c5-9ae2-aa4d5dea057c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/e9812bec-ded8-423d-9807-354adc5720aa" name="default.cirros_ns.cirros_nsd_vld1-ba7fcc4f-fa76-49b1-8fa0-2b0791141fdd" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/eec8ef17-e379-4e40-a743-4ecec6afe616" name="cirros_nsd_vld1-aa9832d6-7d7a-4ac9-be56-cd171063818b" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/ef16d51c-3a54-4eea-bc15-9aa1e92b140f" name="default.cirros_ns.cirros_nsd_vld1-fe7170ad-0b0a-491d-b585-4de31e758ad7" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/f1554f21-4a7b-40be-9a34-a1b640c13398" name="default.Test21.cirros_nsd_vld1-c8f2b860-6794-4c8e-9a5b-3f107f23bbc4" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/fcbbf40a-6578-4054-b496-f10504b94b21" name="default.cirros_ns.cirros_nsd_vld1-a3021c0f-a0fe-413d-9067-cb9182e1f614" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/1fd6421e-929a-4576-bc19-a0c48aea1969" name="default" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/41b8a539-6927-4ec4-a411-aedae8129c45" name="test001.vld2-name-e34e32fd-6d3f-4d24-9d29-e8dab46e515a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/437258c7-a221-48cd-b889-d24b2fc15087" name="Mgmt-Network-3151" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/bfd56159-9178-4021-a5d8-9ec050569b0c" name="test001.net_internal_name-34602686-3619-4356-98e9-27f6e13e84ad" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/08a0276e-d0fb-4223-92ae-003857ccd38f" name="pytest-09/20/17-05:26:01-cirros_nsd.cirros_nsd_vld1-d6688412-e82a-4cf7-aa77-400beb70dbbf" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/294f2cba-9a81-49c5-bb73-fdaa6644c6ec" name="pytest-09/20/17-03:47:31-cirros_nsd.cirros_nsd_vld1-bd7e8e04-d075-4851-b550-0cf9737c7c8d" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/network/d660e25b-8049-4e8f-a4b8-6811465197d7" name="Ns1.mgmt-dee74b34-51a5-4caa-aafe-d0c896e53828" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>\r
-<Link rel="down" href="https://localhost/api/supportedSystemsInfo/" type="application/vnd.vmware.vcloud.supportedSystemsInfo+xml"/>\r
-<Link rel="down" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="down" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/hybrid" type="application/vnd.vmware.vcloud.hybridOrg+xml"/>\r
-<Link rel="alternate" href="https://localhost/api/admin/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.admin.organization+xml"/>\r
-<Link rel="down" href="https://localhost/api/vdcTemplates" type="application/vnd.vmware.admin.vdcTemplates+xml"/>\r
-<Link rel="instantiate" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/action/instantiate" type="application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"/>\r
-<Description/><FullName>Organization 3</FullName>\r
-</Org>\r
-"""\r
-\r
-delete_catalog_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\n<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="80d8488f67ba1de98b7f485fba6abbd2" id="urn:vcloud:catalog:f3bf3733-465b-419f-b675-52f91d18edbb" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\r
-<Link rel="copy" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
-<Link rel="move" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>\r
-<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Link rel="controlAccess" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\r
-<Description>80d8488f67ba1de98b7f485fba6abbd2</Description>\r
-<CatalogItems>\r
-    <CatalogItem href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" id="8a984fdd-d2cb-4d58-a739-2ea12560aded" name="80d8488f67ba1de98b7f485fba6abbd2" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
-</CatalogItems>\r
-<IsPublished>\r
-    false\r
-</IsPublished>\r
-<DateCreated>2017-09-24T02:30:23.623-07:00</DateCreated>\r
-<VersionNumber>2</VersionNumber>\r
-</Catalog>"""\r
-\r
-delete_catalog_item_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" size="0" name="80d8488f67ba1de98b7f485fba6abbd2" id="urn:vcloud:catalogitem:8a984fdd-d2cb-4d58-a739-2ea12560aded" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" type="application/vnd.vmware.vcloud.catalogItem+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Link rel="up" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb" type="application/vnd.vmware.vcloud.catalog+xml"/>\r
-<Link rel="down" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="edit" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\r
-<Link rel="remove" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded"/>    <Description>medial_file_name vApp Template</Description>\r
-<Entity href="https://localhost/api/vAppTemplate/vappTemplate-2731194b-637a-45f5-8e6d-dc65690302f7" name="80d8488f67ba1de98b7f485fba6abbd2" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>   <DateCreated>2017-09-24T02:30:26.380-07:00</DateCreated>\r
-<VersionNumber>1</VersionNumber>\r
-</CatalogItem>"""\r
-\r
-undeploy_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" expiryTime="2018-07-17T23:53:10.781-07:00" operation="Stopping Virtual Application Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46(86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed)" operationName="vappUndeployPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2018-04-18T23:53:10.781-07:00" status="queued" name="task" id="urn:vcloud:task:5ca0a79f-c025-47b9-9f20-b6a04fd67ea3" href="https://localhost/api/task/5ca0a79f-c025-47b9-9f20-b6a04fd67ea3" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Owner href="https://localhost/api/vApp/vapp-86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed" name="Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
-<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
-<Details/>\r
-</Task>\r
-"""\r
-\r
-delete_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" expiryTime="2018-07-17T23:54:11.696-07:00" operation="Deleting Virtual Application Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46(86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed)" operationName="vdcDeleteVapp" serviceNamespace="com.vmware.vcloud" startTime="2018-04-18T23:54:11.696-07:00" status="queued" name="task" id="urn:vcloud:task:f0399f4e-ddd5-4050-959f-5970ba0a63e6" href="https://localhost/api/task/f0399f4e-ddd5-4050-959f-5970ba0a63e6" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Owner href="https://localhost/api/vApp/vapp-86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed" name="Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
-<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
-<Details/>\r
-</Task>"""\r
-\r
-status_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" endTime="2018-04-19T01:24:46.643-07:00" expiryTime="2018-07-18T01:24:39.363-07:00" operation="Powered Off Virtual Application Test1_vm-fa13aee3-fb79-456f-8ce9-17f029ec4324(e9765c7a-b0de-4663-9db9-028bf0031f4d)" operationName="vappPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2018-04-19T01:24:39.363-07:00" status="success" name="task" id="urn:vcloud:task:17ebe394-b419-4612-ab55-cad3000d780a" href="https://localhost/api/task/17ebe394-b419-4612-ab55-cad3000d780a" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\r
-<Owner href="https://localhost/api/vApp/vapp-e9765c7a-b0de-4663-9db9-028bf0031f4d" name="Test1_vm-fa13aee3-fb79-456f-8ce9-17f029ec4324" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\r
-<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\r
-<Details/>\r
-</Task>\r
-"""\r
-\r
-vm_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\r
-<Vm xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" needsCustomization="false" nestedHypervisorEnabled="false" deployed="true" status="4" name="Ubuntu_no_nic" id="urn:vcloud:vm:53a529b2-10d8-4d56-a7ad-8182acdbe71c" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c" type="application/vnd.vmware.vcloud.vm+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/ovf/environment/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.1.0.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\r
-<Link rel="power:powerOff" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/powerOff"/>\r
-<Link rel="power:reboot" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/reboot"/>    <Link rel="power:reset" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/reset"/>\r
-<Link rel="power:shutdown" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/shutdown"/>\r
-<Link rel="power:suspend" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/suspend"/> <Link rel="undeploy" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c" type="application/vnd.vmware.vcloud.vm+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
-<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
-<Link rel="metrics" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>\r
-<Link rel="metrics" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>\r
-<Link rel="screen:thumbnail" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen"/>\r
-<Link rel="screen:acquireTicket" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen/action/acquireTicket"/>\r
-<Link rel="screen:acquireMksTicket" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen/action/acquireMksTicket" type="application/vnd.vmware.vcloud.mksTicket+xml"/>\r
-<Link rel="media:insertMedia" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/media/action/insertMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
-<Link rel="media:ejectMedia" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/media/action/ejectMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>\r
-<Link rel="disk:attach" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/disk/action/attach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
-<Link rel="disk:detach" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/disk/action/detach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>\r
-<Link rel="installVmwareTools" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/installVMwareTools"/>\r
-<Link rel="customizeAtNextPowerOn" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/customizeAtNextPowerOn"/>\r
-<Link rel="snapshot:create" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>\r
-<Link rel="reconfigureVm" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/reconfigureVm" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vm+xml"/>\r
-<Link rel="up" href="https://localhost/api/vApp/vapp-5a5ca3da-3826-4fe4-83c5-c018ad1765fa" type="application/vnd.vmware.vcloud.vApp+xml"/>\r
-<Description/>\r
-<ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/">\r
-<ovf:Info>Virtual hardware requirements</ovf:Info>\r
-<ovf:System>\r
-<vssd:ElementName>Virtual Hardware Family</vssd:ElementName>\r
-<vssd:InstanceID>0</vssd:InstanceID>\r
-<vssd:VirtualSystemIdentifier>Ubuntu_no_nic</vssd:VirtualSystemIdentifier>\r
-<vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>\r
-</ovf:System><ovf:Item>\r
-<rasd:Address>00:50:56:01:14:1a</rasd:Address>\r
-<rasd:AddressOnParent>0</rasd:AddressOnParent>\r
-<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\r
-<rasd:Connection vcloud:ipAddressingMode="DHCP" vcloud:ipAddress="172.16.27.72" vcloud:primaryNetworkConnection="true">testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce</rasd:Connection>\r
-<rasd:Description>Vmxnet3 ethernet adapter on "testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce"\r
-</rasd:Description><rasd:ElementName>Network adapter 0</rasd:ElementName>\r
-<rasd:InstanceID>1</rasd:InstanceID>\r
-<rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>\r
-<rasd:ResourceType>10</rasd:ResourceType></ovf:Item><ovf:Item>\r
-<rasd:Address>0</rasd:Address><rasd:Description>SCSI Controller</rasd:Description>\r
-<rasd:ElementName>SCSI Controller 0</rasd:ElementName>\r
-<rasd:InstanceID>2</rasd:InstanceID>\r
-<rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>\r
-<rasd:ResourceType>6</rasd:ResourceType></ovf:Item><ovf:Item>\r
-<rasd:AddressOnParent>0</rasd:AddressOnParent>\r
-<rasd:Description>Hard disk</rasd:Description>\r
-<rasd:ElementName>Hard disk 1</rasd:ElementName>\r
-<rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="10240" vcloud:storageProfileOverrideVmDefault="false"/>      <rasd:InstanceID>2000</rasd:InstanceID>\r
-<rasd:Parent>2</rasd:Parent><rasd:ResourceType>17</rasd:ResourceType>\r
-<rasd:VirtualQuantity>10737418240</rasd:VirtualQuantity>\r
-<rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits>\r
-</ovf:Item><ovf:Item><rasd:Address>1</rasd:Address>\r
-<rasd:Description>IDE Controller</rasd:Description>\r
-<rasd:ElementName>IDE Controller 1</rasd:ElementName>\r
-<rasd:InstanceID>3</rasd:InstanceID>\r
-<rasd:ResourceType>5</rasd:ResourceType>\r
-</ovf:Item><ovf:Item><rasd:AddressOnParent>0</rasd:AddressOnParent>\r
-<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\r
-<rasd:Description>CD/DVD Drive</rasd:Description>\r
-<rasd:ElementName>CD/DVD Drive 1</rasd:ElementName><rasd:HostResource/>\r
-<rasd:InstanceID>3002</rasd:InstanceID>\r
-<rasd:Parent>3</rasd:Parent>\r
-<rasd:ResourceType>15</rasd:ResourceType></ovf:Item><ovf:Item>\r
-<rasd:AddressOnParent>0</rasd:AddressOnParent>\r
-<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\r
-<rasd:Description>Floppy Drive</rasd:Description>\r
-<rasd:ElementName>Floppy Drive 1</rasd:ElementName>\r
-<rasd:HostResource/><rasd:InstanceID>8000</rasd:InstanceID>\r
-<rasd:ResourceType>14</rasd:ResourceType>\r
-</ovf:Item>\r
-<ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu">\r
-<rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>\r
-<rasd:Description>Number of Virtual CPUs</rasd:Description>\r
-<rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>\r
-<rasd:InstanceID>4</rasd:InstanceID>\r
-<rasd:Reservation>0</rasd:Reservation>\r
-<rasd:ResourceType>3</rasd:ResourceType>\r
-<rasd:VirtualQuantity>1</rasd:VirtualQuantity>\r
-<rasd:Weight>0</rasd:Weight>\r
-<vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item>\r
-<ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory">\r
-<rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>\r
-<rasd:Description>Memory Size</rasd:Description>\r
-<rasd:ElementName>1024 MB of memory</rasd:ElementName>\r
-<rasd:InstanceID>5</rasd:InstanceID>\r
-<rasd:Reservation>0</rasd:Reservation>\r
-<rasd:ResourceType>4</rasd:ResourceType>\r
-<rasd:VirtualQuantity>1024</rasd:VirtualQuantity>\r
-<rasd:Weight>0</rasd:Weight>\r
-<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-        </ovf:Item>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/" type="application/vnd.vmware.vcloud.virtualHardwareSection+xml"/>\r
-        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\r
-        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\r
-    </ovf:VirtualHardwareSection>\r
-    <ovf:OperatingSystemSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:id="94" vcloud:type="application/vnd.vmware.vcloud.operatingSystemSection+xml" vmw:osType="ubuntu64Guest" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/operatingSystemSection/">\r
-        <ovf:Info>Specifies the operating system installed</ovf:Info>\r
-        <ovf:Description>Ubuntu Linux (64-bit)</ovf:Description>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/operatingSystemSection/" type="application/vnd.vmware.vcloud.operatingSystemSection+xml"/>\r
-    </ovf:OperatingSystemSection>\r
-    <NetworkConnectionSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\r
-        <ovf:Info>Specifies the available VM network connections</ovf:Info>\r
-        <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>\r
-        <NetworkConnection needsCustomization="false" network="testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce">\r
-            <NetworkConnectionIndex>0</NetworkConnectionIndex>\r
-            <IpAddress>172.16.27.72</IpAddress>\r
-            <IsConnected>true</IsConnected>\r
-            <MACAddress>00:50:56:01:14:1a</MACAddress>\r
-            <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>\r
-            <NetworkAdapterType>VMXNET3</NetworkAdapterType>\r
-        </NetworkConnection>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>\r
-    </NetworkConnectionSection>  \r
-    <NetworkConnectionSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\r
-        <ovf:Info>Specifies the available VM network connections</ovf:Info>\r
-        <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>\r
-        <NetworkConnection needsCustomization="false" network="testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce">\r
-            <NetworkConnectionIndex>0</NetworkConnectionIndex>\r
-            <IpAddress>172.16.27.72</IpAddress>\r
-            <IsConnected>true</IsConnected>\r
-            <MACAddress>00:50:56:01:14:1a</MACAddress>\r
-            <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>\r
-        </NetworkConnection>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>\r
-    </NetworkConnectionSection>\r
-    <GuestCustomizationSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false">\r
-        <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>\r
-        <Enabled>true</Enabled>\r
-        <ChangeSid>false</ChangeSid>\r
-        <VirtualMachineId>53a529b2-10d8-4d56-a7ad-8182acdbe71c</VirtualMachineId>\r
-        <JoinDomainEnabled>false</JoinDomainEnabled>\r
-        <UseOrgSettings>false</UseOrgSettings>\r
-        <AdminPasswordEnabled>false</AdminPasswordEnabled>\r
-        <AdminPasswordAuto>true</AdminPasswordAuto>\r
-        <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>\r
-        <AdminAutoLogonCount>0</AdminAutoLogonCount>\r
-        <ResetPasswordRequired>false</ResetPasswordRequired>\r
-        <ComputerName>Ubuntunonic-001</ComputerName>\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml"/>\r
-    </GuestCustomizationSection>\r
-    <RuntimeInfoSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/runtimeInfoSection">\r
-        <ovf:Info>Specifies Runtime info</ovf:Info>\r
-        <VMWareTools version="2147483647"/>\r
-    </RuntimeInfoSection>\r
-    <SnapshotSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false">\r
-        <ovf:Info>Snapshot information section</ovf:Info>\r
-    </SnapshotSection>\r
-    <DateCreated>2018-04-19T04:19:28.150-07:00</DateCreated>\r
-    <VAppScopedLocalId>Ubuntu_no_nic</VAppScopedLocalId>\r
-    <ovfenv:Environment xmlns:ns11="http://www.vmware.com/schema/ovfenv" ovfenv:id="" ns11:vCenterId="vm-8971">\r
-        <ovfenv:PlatformSection>\r
-<ovfenv:Kind>VMware ESXi</ovfenv:Kind>\r
-<ovfenv:Version>6.0.0</ovfenv:Version>\r
-<ovfenv:Vendor>VMware, Inc.</ovfenv:Vendor>\r
-<ovfenv:Locale>en</ovfenv:Locale>\r
-        </ovfenv:PlatformSection>\r
-        <ovfenv:PropertySection>\r
-<ovfenv:Property ovfenv:key="vCloud_UseSysPrep" ovfenv:value="None"/>\r
-<ovfenv:Property ovfenv:key="vCloud_bitMask" ovfenv:value="1"/>\r
-<ovfenv:Property ovfenv:key="vCloud_bootproto_0" ovfenv:value="dhcp"/>\r
-<ovfenv:Property ovfenv:key="vCloud_computerName" ovfenv:value="Ubuntunonic-001"/>\r
-<ovfenv:Property ovfenv:key="vCloud_macaddr_0" ovfenv:value="00:50:56:01:14:1a"/>\r
-<ovfenv:Property ovfenv:key="vCloud_markerid" ovfenv:value="ec8b90ea-cb5d-43b4-8910-91380ff29d97"/>\r
-<ovfenv:Property ovfenv:key="vCloud_numnics" ovfenv:value="1"/>\r
-<ovfenv:Property ovfenv:key="vCloud_primaryNic" ovfenv:value="0"/>\r
-<ovfenv:Property ovfenv:key="vCloud_reconfigToken" ovfenv:value="132681259"/>\r
-<ovfenv:Property ovfenv:key="vCloud_resetPassword" ovfenv:value="0"/>\r
-        </ovfenv:PropertySection>\r
-        <ve:EthernetAdapterSection xmlns:ve="http://www.vmware.com/schema/ovfenv" xmlns="http://schemas.dmtf.org/ovf/environment/1" xmlns:oe="http://schemas.dmtf.org/ovf/environment/1">\r
-<ve:Adapter ve:mac="00:50:56:01:14:1a" ve:network="DPG-MGMT-3151" ve:unitNumber="7"/>\r
-   \r
-        </ve:EthernetAdapterSection>\r
-    </ovfenv:Environment>\r
-    <VmCapabilities href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml">\r
-        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"/>\r
-        <MemoryHotAddEnabled>false</MemoryHotAddEnabled>\r
-        <CpuHotAddEnabled>false</CpuHotAddEnabled>\r
-    </VmCapabilities>\r
-    <StorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\r
-</Vm>"""\r
-\r
-delete_tenant = """<?xml version="1.0" encoding="UTF-8"?>\n<Vdc xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5" status="1" name="testing_Cqm5fiZ" id="urn:vcloud:vdc:753227f5-d6c6-4478-9546-acc5cfff21e9" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.vcloud.vdc+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/vcloud/extension/v1.5 http://localhost/api/v1.5/schema/vmwextensions.xsd">\n    <VCloudExtension required="false">\n        <vmext:VimObjectRef>\n            <vmext:VimServerRef href="https://localhost/api/admin/extension/vimServer/cc82baf9-9f80-4468-bfe9-ce42b3f9dde5" name="VC" type="application/vnd.vmware.admin.vmwvirtualcenter+xml"/>\n            <vmext:MoRef>resgroup-9025</vmext:MoRef>\n            <vmext:VimObjectType>RESOURCE_POOL</vmext:VimObjectType>\n        </vmext:VimObjectRef>\n    </VCloudExtension>\n    <Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\n    <Link rel="down" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="edit" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="remove" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/uploadVAppTemplate" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/media" type="application/vnd.vmware.vcloud.media+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/instantiateOvf" type="application/vnd.vmware.vcloud.instantiateOvfParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/instantiateVAppTemplate" type="application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneVApp" type="application/vnd.vmware.vcloud.cloneVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneVAppTemplate" type="application/vnd.vmware.vcloud.cloneVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneMedia" type="application/vnd.vmware.vcloud.cloneMediaParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/composeVApp" type="application/vnd.vmware.vcloud.composeVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/disk" type="application/vnd.vmware.vcloud.diskCreateParams+xml"/>\n    <Link rel="edgeGateways" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/edgeGateways" type="application/vnd.vmware.vcloud.query.records+xml"/>\n    <Link rel="add" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/networks" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\n    <Link rel="orgVdcNetworks" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/networks" type="application/vnd.vmware.vcloud.query.records+xml"/>\n    <Link rel="alternate" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.admin.vdc+xml"/>\n    <Description>opnemano</Description>\n    <AllocationModel>AllocationVApp</AllocationModel>\n    <ComputeCapacity>\n        <Cpu>\n            <Units>MHz</Units>\n            <Allocated>0</Allocated>\n            <Limit>2048</Limit>\n            <Reserved>0</Reserved>\n            <Used>0</Used>\n            <Overhead>0</Overhead>\n        </Cpu>\n        <Memory>\n            <Units>MB</Units>\n            <Allocated>0</Allocated>\n            <Limit>2048</Limit>\n            <Reserved>0</Reserved>\n            <Used>0</Used>\n            <Overhead>0</Overhead>\n        </Memory>\n    </ComputeCapacity>\n    <ResourceEntities/>\n    <AvailableNetworks/>\n    <Capabilities>\n        <SupportedHardwareVersions>\n            <SupportedHardwareVersion>vmx-04</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-07</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-08</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-09</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-10</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-11</SupportedHardwareVersion>\n        </SupportedHardwareVersions>\n    </Capabilities>\n    <NicQuota>100</NicQuota>\n    <NetworkQuota>100</NetworkQuota>\n    <UsedNetworkCount>0</UsedNetworkCount>\n    <VmQuota>50</VmQuota>\n    <IsEnabled>true</IsEnabled>\n    <VdcStorageProfiles>\n        <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/37ec8982-e6c3-4fba-a107-0fa36fe292d0" name="NFS Storage Policy" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n    </VdcStorageProfiles>\n    <VCpuInMhz2>1000</VCpuInMhz2>\n</Vdc>\n"""\r
-\r
-catalog_list_xml = """<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" size="0" name="Ubuntu_no_nic" id="urn:vcloud:catalogitem:d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd"><Link rel="up" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" type="application/vnd.vmware.vcloud.catalog+xml"/><Link rel="down" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/><Link rel="edit" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/><Link rel="remove" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad"/><Description/><Entity href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/><DateCreated>2017-10-14T23:52:58.097-07:00</DateCreated><VersionNumber>1</VersionNumber></CatalogItem>"""\r
-\r
-catalogItem_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" size="0" name="Ubuntu_no_nic" id="urn:vcloud:catalogitem:d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\n    <Link rel="up" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" type="application/vnd.vmware.vcloud.catalog+xml"/>\n    <Link rel="down" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="edit" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\n    <Link rel="remove" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad"/>\n    <Description/>\n    <Entity href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n    <DateCreated>2017-10-14T23:52:58.097-07:00</DateCreated>\n    <VersionNumber>1</VersionNumber>\n</CatalogItem>"""\r
-\r
-vapp_template_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<VAppTemplate xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" goldMaster="false" ovfDescriptorUploaded="true" status="8" name="Ubuntu_no_nic" id="urn:vcloud:vapptemplate:593e3130-ac0b-44f1-8289-14329dcc5435" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\n    <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="catalogItem" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\n    <Link rel="remove" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435"/>\n    <Link rel="edit" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n    <Link rel="enable" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/action/enableDownload"/>\n    <Link rel="disable" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/action/disableDownload"/>\n    <Link rel="ovf" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/ovf" type="text/xml"/>\n    <Link rel="storageProfile" href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n    <Description/>\n    <Owner type="application/vnd.vmware.vcloud.owner+xml">\n        <User href="https://localhost/api/admin/user/4e1905dc-7c0b-4013-b763-d01960853f49" name="system" type="application/vnd.vmware.admin.user+xml"/>\n    </Owner>\n    <Children>\n        <Vm goldMaster="false" status="8" name="Ubuntu_no_nic" id="urn:vcloud:vm:bd3fe155-3fb2-40a8-af48-89c276983166" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166" type="application/vnd.vmware.vcloud.vm+xml">\n            <Link rel="up" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n            <Link rel="storageProfile" href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n            <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n            <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n            <Description/>\n            <NetworkConnectionSection href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\n                <ovf:Info>Specifies the available VM network connections</ovf:Info>\n            </NetworkConnectionSection>\n            <GuestCustomizationSection href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false">\n                <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>\n                <Enabled>true</Enabled>\n                <ChangeSid>false</ChangeSid>\n                <VirtualMachineId>bd3fe155-3fb2-40a8-af48-89c276983166</VirtualMachineId>\n                <JoinDomainEnabled>false</JoinDomainEnabled>\n                <UseOrgSettings>false</UseOrgSettings>\n                <AdminPasswordEnabled>false</AdminPasswordEnabled>\n                <AdminPasswordAuto>true</AdminPasswordAuto>\n                <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>\n                <AdminAutoLogonCount>0</AdminAutoLogonCount>\n                <ResetPasswordRequired>false</ResetPasswordRequired>\n                <ComputerName>Ubuntunonic-001</ComputerName>\n            </GuestCustomizationSection>\n            <ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/">\n                <ovf:Info>Virtual hardware requirements</ovf:Info>\n                <ovf:System>\n                    <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>\n                    <vssd:InstanceID>0</vssd:InstanceID>\n                    <vssd:VirtualSystemIdentifier>Ubuntu_no_nic</vssd:VirtualSystemIdentifier>\n                    <vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>\n                </ovf:System>\n                <ovf:Item>\n                    <rasd:Address>0</rasd:Address>\n                    <rasd:Description>SCSI Controller</rasd:Description>\n                    <rasd:ElementName>SCSI Controller 0</rasd:ElementName>\n                    <rasd:InstanceID>1</rasd:InstanceID>\n                    <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>\n                    <rasd:ResourceType>6</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:Description>Hard disk</rasd:Description>\n                    <rasd:ElementName>Hard disk 1</rasd:ElementName>\n                    <rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="5120" vcloud:storageProfileOverrideVmDefault="false"/>\n                    <rasd:InstanceID>2000</rasd:InstanceID>\n                    <rasd:Parent>1</rasd:Parent>\n                    <rasd:ResourceType>17</rasd:ResourceType>\n                    <rasd:VirtualQuantity>5368709120</rasd:VirtualQuantity>\n                    <rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:Address>1</rasd:Address>\n                    <rasd:Description>IDE Controller</rasd:Description>\n                    <rasd:ElementName>IDE Controller 1</rasd:ElementName>\n                    <rasd:InstanceID>2</rasd:InstanceID>\n                    <rasd:ResourceType>5</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\n                    <rasd:Description>CD/DVD Drive</rasd:Description>\n                    <rasd:ElementName>CD/DVD Drive 1</rasd:ElementName>\n                    <rasd:HostResource/>\n                    <rasd:InstanceID>3002</rasd:InstanceID>\n                    <rasd:Parent>2</rasd:Parent>\n                    <rasd:ResourceType>15</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\n                    <rasd:Description>Floppy Drive</rasd:Description>\n                    <rasd:ElementName>Floppy Drive 1</rasd:ElementName>\n                    <rasd:HostResource/>\n                    <rasd:InstanceID>8000</rasd:InstanceID>\n                    <rasd:ResourceType>14</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>\n                    <rasd:Description>Number of Virtual CPUs</rasd:Description>\n                    <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>\n                    <rasd:InstanceID>3</rasd:InstanceID>\n                    <rasd:Reservation>0</rasd:Reservation>\n                    <rasd:ResourceType>3</rasd:ResourceType>\n                    <rasd:VirtualQuantity>1</rasd:VirtualQuantity>\n                    <rasd:Weight>0</rasd:Weight>\n                    <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>\n                    <rasd:Description>Memory Size</rasd:Description>\n                    <rasd:ElementName>1024 MB of memory</rasd:ElementName>\n                    <rasd:InstanceID>4</rasd:InstanceID>\n                    <rasd:Reservation>0</rasd:Reservation>\n                    <rasd:ResourceType>4</rasd:ResourceType>\n                    <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>\n                    <rasd:Weight>0</rasd:Weight>\n                </ovf:Item>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n            </ovf:VirtualHardwareSection>\n            <VAppScopedLocalId>Ubuntu_no_nic</VAppScopedLocalId>\n            <DateCreated>2017-10-14T23:52:58.790-07:00</DateCreated>\n        </Vm>\n    </Children>\n    <ovf:NetworkSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.networkSection+xml" vcloud:href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/networkSection/">\n        <ovf:Info>The list of logical networks</ovf:Info>\n    </ovf:NetworkSection>\n    <NetworkConfigSection href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/networkConfigSection/" type="application/vnd.vmware.vcloud.networkConfigSection+xml" ovf:required="false">\n        <ovf:Info>The configuration parameters for logical networks</ovf:Info>\n    </NetworkConfigSection>\n    <LeaseSettingsSection href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml" ovf:required="false">\n        <ovf:Info>Lease settings section</ovf:Info>\n        <Link rel="edit" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml"/>\n        <StorageLeaseInSeconds>7776000</StorageLeaseInSeconds>\n        <StorageLeaseExpiration>2018-08-22T02:41:54.567-07:00</StorageLeaseExpiration>\n    </LeaseSettingsSection>\n    <CustomizationSection goldMaster="false" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/customizationSection/" type="application/vnd.vmware.vcloud.customizationSection+xml" ovf:required="false">\n        <ovf:Info>VApp template customization section</ovf:Info>\n        <CustomizeOnInstantiate>true</CustomizeOnInstantiate>\n    </CustomizationSection>\n    <DateCreated>2017-10-14T23:52:58.790-07:00</DateCreated>\n</VAppTemplate>\n"""\r
-\r
-deployed_vapp_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<VApp xmlns="http://www.vmware.com/vcloud/v1.5" ovfDescriptorUploaded="true" deployed="false" status="0" name="Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a" id="urn:vcloud:vapp:8b3ab861-cc53-4bd8-bdd0-85a74af76c61" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61" type="application/vnd.vmware.vcloud.vApp+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\n    <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="ovf" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/ovf" type="text/xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n    <Description>Vapp instantiation</Description>\n    <Tasks>\n        <Task cancelRequested="false" expiryTime="2018-08-31T01:14:34.292-07:00" operation="Creating Virtual Application Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a(8b3ab861-cc53-4bd8-bdd0-85a74af76c61)" operationName="vdcInstantiateVapp" serviceNamespace="com.vmware.vcloud" startTime="2018-06-02T01:14:34.292-07:00" status="queued" name="task" id="urn:vcloud:task:1d588451-6b7d-43f4-b8c7-c9155dcd715a" href="https://localhost/api/task/1d588451-6b7d-43f4-b8c7-c9155dcd715a" type="application/vnd.vmware.vcloud.task+xml">\n            <Owner href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61" name="Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a" type="application/vnd.vmware.vcloud.vApp+xml"/>\n            <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\n            <Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\n            <Progress>1</Progress>\n            <Details/>\n        </Task>\n    </Tasks>\n    <DateCreated>2018-06-02T01:14:32.870-07:00</DateCreated>\n    <Owner type="application/vnd.vmware.vcloud.owner+xml">\n        <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\n    </Owner>\n    <InMaintenanceMode>false</InMaintenanceMode>\n</VApp>"""\r
diff --git a/osm_ro/utils.py b/osm_ro/utils.py
deleted file mode 100644 (file)
index 625ff6d..0000000
+++ /dev/null
@@ -1,417 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-utils is a module that implements functions that are used by all openmano modules,
-dealing with aspects such as reading/writing files, formatting inputs/outputs for quick translation
-from dictionaries to appropriate database dictionaries, etc.
-'''
-__author__="Alfonso Tierno, Gerardo Garcia"
-__date__ ="$08-sep-2014 12:21:22$"
-
-import datetime
-import time
-import warnings
-from functools import reduce, partial, wraps
-from itertools import tee
-
-import six
-from six.moves import filter, filterfalse
-
-from jsonschema import exceptions as js_e
-from jsonschema import validate as js_v
-
-if six.PY3:
-    from inspect import getfullargspec as getspec
-else:
-    from inspect import getargspec as getspec
-
-#from bs4 import BeautifulSoup
-
-def read_file(file_to_read):
-    """Reads a file specified by 'file_to_read' and returns (True,<its content as a string>) in case of success or (False, <error message>) in case of failure"""
-    try:
-        f = open(file_to_read, 'r')
-        read_data = f.read()
-        f.close()
-    except Exception as e:
-        return (False, str(e))
-
-    return (True, read_data)
-
-def write_file(file_to_write, text):
-    """Write a file specified by 'file_to_write' and returns (True,NOne) in case of success or (False, <error message>) in case of failure"""
-    try:
-        f = open(file_to_write, 'w')
-        f.write(text)
-        f.close()
-    except Exception as e:
-        return (False, str(e))
-
-    return (True, None)
-
-def format_in(http_response, schema):
-    try:
-        client_data = http_response.json()
-        js_v(client_data, schema)
-        #print "Input data: ", str(client_data)
-        return True, client_data
-    except js_e.ValidationError as exc:
-        print "validate_in error, jsonschema exception ", exc.message, "at", exc.path
-        return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
-
-def remove_extra_items(data, schema):
-    deleted = []
-    if type(data) is tuple or type(data) is list:
-        for d in data:
-            a = remove_extra_items(d, schema['items'])
-            if a is not None:
-                deleted.append(a)
-    elif type(data) is dict:
-        # TODO deal with patternProperties
-        if 'properties' not in schema:
-            return None
-        for k in data.keys():
-            if k in schema['properties'].keys():
-                a = remove_extra_items(data[k], schema['properties'][k])
-                if a is not None:
-                    deleted.append({k: a})
-            elif not schema.get('additionalProperties'):
-                del data[k]
-                deleted.append(k)
-    if len(deleted) == 0:
-        return None
-    elif len(deleted) == 1:
-        return deleted[0]
-
-    return deleted
-
-#def format_html2text(http_content):
-#    soup=BeautifulSoup(http_content)
-#    text = soup.p.get_text() + " " + soup.pre.get_text()
-#    return text
-
-
-def delete_nulls(var):
-    if type(var) is dict:
-        for k in var.keys():
-            if var[k] is None: del var[k]
-            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple:
-                if delete_nulls(var[k]): del var[k]
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for k in var:
-            if type(k) is dict: delete_nulls(k)
-        if len(var) == 0: return True
-    return False
-
-
-def convert_bandwidth(data, reverse=False):
-    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
-    It assumes that bandwidth is well formed
-    Attributes:
-        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
-        'reverse': by default convert form str to int (Mbps), if True it convert from number to units
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                convert_bandwidth(data[k], reverse)
-        if "bandwidth" in data:
-            try:
-                value=str(data["bandwidth"])
-                if not reverse:
-                    pos = value.find("bps")
-                    if pos>0:
-                        if value[pos-1]=="G": data["bandwidth"] =  int(data["bandwidth"][:pos-1]) * 1000
-                        elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
-                        else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
-                else:
-                    value = int(data["bandwidth"])
-                    if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
-                    else: data["bandwidth"]=str(value) + " Mbps"
-            except:
-                print "convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"]
-                return
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                convert_bandwidth(k, reverse)
-
-def convert_float_timestamp2str(var):
-    '''Converts timestamps (created_at, modified_at fields) represented as float
-    to a string with the format '%Y-%m-%dT%H:%i:%s'
-    It enters recursively in the dict var finding this kind of variables
-    '''
-    if type(var) is dict:
-        for k,v in var.items():
-            if type(v) is float and k in ("created_at", "modified_at"):
-                var[k] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(v) )
-            elif type(v) is dict or type(v) is list or type(v) is tuple:
-                convert_float_timestamp2str(v)
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for v in var:
-            convert_float_timestamp2str(v)
-
-def convert_datetime2str(var):
-    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
-    It enters recursively in the dict var finding this kind of variables
-    '''
-    if type(var) is dict:
-        for k,v in var.items():
-            if type(v) is datetime.datetime:
-                var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
-            elif type(v) is dict or type(v) is list or type(v) is tuple:
-                convert_datetime2str(v)
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for v in var:
-            convert_datetime2str(v)
-
-def convert_str2boolean(data, items):
-    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
-    Done recursively
-    Attributes:
-        'data': dictionary variable to be checked. None or empty is considered valid
-        'items': tuple of keys to convert
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                convert_str2boolean(data[k], items)
-            if k in items:
-                if type(data[k]) is str:
-                    if   data[k]=="false" or data[k]=="False": data[k]=False
-                    elif data[k]=="true"  or data[k]=="True":  data[k]=True
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                convert_str2boolean(k, items)
-
-def check_valid_uuid(uuid):
-    id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-    id_schema2 = {"type" : "string", "pattern": "^[a-fA-F0-9]{32}$"}
-    try:
-        js_v(uuid, id_schema)
-        return True
-    except js_e.ValidationError:
-        try:
-            js_v(uuid, id_schema2)
-            return True
-        except js_e.ValidationError:
-            return False
-    return False
-
-
-def expand_brackets(text):
-    """
-    Change a text with TEXT[ABC..] into a list with [TEXTA, TEXTB, TEXC, ...
-    if no bracket is used it just return the a list with the single text
-    It uses recursivity to allow several [] in the text
-    :param text:
-    :return:
-    """
-    if text is None:
-        return (None, )
-    start = text.find("[")
-    end = text.find("]")
-    if start < 0 or end < 0:
-        return [text]
-    text_list = []
-    for char in text[start+1:end]:
-        text_list += expand_brackets(text[:start] + char + text[end+1:])
-    return text_list
-
-def deprecated(message):
-  def deprecated_decorator(func):
-      def deprecated_func(*args, **kwargs):
-          warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
-                        category=DeprecationWarning,
-                        stacklevel=2)
-          warnings.simplefilter('default', DeprecationWarning)
-          return func(*args, **kwargs)
-      return deprecated_func
-  return deprecated_decorator
-
-
-def truncate(text, max_length=1024):
-    """Limit huge texts in number of characters"""
-    text = str(text)
-    if text and len(text) >= max_length:
-        return text[:max_length//2-3] + " ... " + text[-max_length//2+3:]
-    return text
-
-
-def merge_dicts(*dicts, **kwargs):
-    """Creates a new dict merging N others and keyword arguments.
-    Right-most dicts take precedence.
-    Keyword args take precedence.
-    """
-    return reduce(
-        lambda acc, x: acc.update(x) or acc,
-        list(dicts) + [kwargs], {})
-
-
-def remove_none_items(adict):
-    """Return a similar dict without keys associated to None values"""
-    return {k: v for k, v in adict.items() if v is not None}
-
-
-def filter_dict_keys(adict, allow):
-    """Return a similar dict, but just containing the explicitly allowed keys
-
-    Arguments:
-        adict (dict): Simple python dict data struct
-        allow (list): Explicits allowed keys
-    """
-    return {k: v for k, v in adict.items() if k in allow}
-
-
-def filter_out_dict_keys(adict, deny):
-    """Return a similar dict, but not containing the explicitly denied keys
-
-    Arguments:
-        adict (dict): Simple python dict data struct
-        deny (list): Explicits denied keys
-    """
-    return {k: v for k, v in adict.items() if k not in deny}
-
-
-def expand_joined_fields(record):
-    """Given a db query result, explode the fields that contains `.` (join
-    operations).
-
-    Example
-        >> expand_joined_fiels({'wim.id': 2})
-        # {'wim': {'id': 2}}
-    """
-    result = {}
-    for field, value in record.items():
-        keys = field.split('.')
-        target = result
-        target = reduce(lambda target, key: target.setdefault(key, {}),
-                        keys[:-1], result)
-        target[keys[-1]] = value
-
-    return result
-
-
-def ensure(condition, exception):
-    """Raise an exception if condition is not met"""
-    if not condition:
-        raise exception
-
-
-def partition(predicate, iterable):
-    """Create two derived iterators from a single one
-    The first iterator created will loop thought the values where the function
-    predicate is True, the second one will iterate over the values where it is
-    false.
-    """
-    iterable1, iterable2 = tee(iterable)
-    return filter(predicate, iterable2), filterfalse(predicate, iterable1)
-
-
-def pipe(*functions):
-    """Compose functions of one argument in the opposite order,
-    So pipe(f, g)(x) = g(f(x))
-    """
-    return lambda x: reduce(lambda acc, f: f(acc), functions, x)
-
-
-def compose(*functions):
-    """Compose functions of one argument,
-    So compose(f, g)(x) = f(g(x))
-    """
-    return lambda x: reduce(lambda acc, f: f(acc), functions[::-1], x)
-
-
-def safe_get(target, key_path, default=None):
-    """Given a path of keys (eg.: "key1.key2.key3"), return a nested value in
-    a nested dict if present, or the default value
-    """
-    keys = key_path.split('.')
-    target = reduce(lambda acc, key: acc.get(key) or {}, keys[:-1], target)
-    return target.get(keys[-1], default)
-
-
-class Attempt(object):
-    """Auxiliary class to be used in an attempt to retry executing a failing
-    procedure
-
-    Attributes:
-        count (int): 0-based "retries" counter
-        max_attempts (int): maximum number of "retries" allowed
-        info (dict): extra information about the specific attempt
-            (can be used to produce more meaningful error messages)
-    """
-    __slots__ = ('count', 'max', 'info')
-
-    MAX = 3
-
-    def __init__(self, count=0, max_attempts=MAX, info=None):
-        self.count = count
-        self.max = max_attempts
-        self.info = info or {}
-
-    @property
-    def countdown(self):
-        """Like count, but in the opposite direction"""
-        return self.max - self.count
-
-    @property
-    def number(self):
-        """1-based counter"""
-        return self.count + 1
-
-
-def inject_args(fn=None, **args):
-    """Partially apply keyword arguments in a function, but only if the function
-    define them in the first place
-    """
-    if fn is None:  # Allows calling the decorator directly or with parameters
-        return partial(inject_args, **args)
-
-    spec = getspec(fn)
-    return wraps(fn)(partial(fn, **filter_dict_keys(args, spec.args)))
-
-
-def get_arg(name, fn, args, kwargs):
-    """Find the value of an argument for a function, given its argument list.
-
-    This function can be used to display more meaningful errors for debugging
-    """
-    if name in kwargs:
-        return kwargs[name]
-
-    spec = getspec(fn)
-    if name in spec.args:
-        i = spec.args.index(name)
-        return args[i] if i < len(args) else None
-
-    return None
diff --git a/osm_ro/vim_thread.py b/osm_ro/vim_thread.py
deleted file mode 100644 (file)
index 38a73d1..0000000
+++ /dev/null
@@ -1,1316 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-""""
-This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
-The tasks are stored at database in table vim_wim_actions
-Several vim_wim_actions can refer to the same element at VIM (flavor, network, ...). This is somethng to avoid if RO
-is migrated to a non-relational database as mongo db. Each vim_wim_actions reference a different instance_Xxxxx
-In this case "related" colunm contains the same value, to know they refer to the same vim. In case of deletion, it
-there is related tasks using this element, it is not deleted, The vim_info needed to delete is transfered to other task
-
-The task content is (M: stored at memory, D: stored at database):
-    MD  instance_action_id:  reference a global action over an instance-scenario: database instance_actions
-    MD  task_index:     index number of the task. This together with the previous forms a unique key identifier
-    MD  datacenter_vim_id:  should contain the uuid of the VIM managed by this thread
-    MD  vim_id:     id of the vm,net,etc at VIM
-    MD  item:       database table name, can be instance_vms, instance_nets, TODO: datacenter_flavors, datacenter_images
-    MD  item_id:    uuid of the referenced entry in the previous table
-    MD  action:     CREATE, DELETE, FIND
-    MD  status:     SCHEDULED: action need to be done
-                    BUILD: not used
-                    DONE: Done and it must be polled to VIM periodically to see status. ONLY for action=CREATE or FIND
-                    FAILED: It cannot be created/found/deleted
-                    FINISHED: similar to DONE, but no refresh is needed anymore. Task is maintained at database but
-                        it is never processed by any thread
-                    SUPERSEDED: similar to FINSISHED, but nothing has been done to completed the task.
-    MD  extra:      text with yaml format at database, dict at memory with:
-            params:     list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken
-                        from other related tasks
-            find:       (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains
-                        the FIND params
-            depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends
-                        on a net creation
-                        can contain an int (single index on the same instance-action) or str (compete action ID)
-            sdn_net_id: used for net.
-            interfaces: used for VMs. Each key is the uuid of the instance_interfaces entry at database
-                iface_id: uuid of intance_interfaces
-                sdn_port_id:
-                sdn_net_id:
-                vim_info
-            created_items: dictionary with extra elements created that need to be deleted. e.g. ports, volumes,...
-            created:    False if the VIM element is not created by other actions, and it should not be deleted
-            vim_status: VIM status of the element. Stored also at database in the instance_XXX
-            vim_info:   Detailed information of a vm/net from the VIM. Stored at database in the instance_XXX but not at
-                        vim_wim_actions
-    M   depends:    dict with task_index(from depends_on) to dependency task
-    M   params:     same as extra[params]
-    MD  error_msg:  descriptive text upon an error.Stored also at database instance_XXX
-    MD  created_at: task creation time. The task of creation must be the oldest
-    MD  modified_at: next time task need to be processed. For example, for a refresh, it contain next time refresh must
-                     be done
-    MD related:     All the tasks over the same VIM element have same "related". Note that other VIMs can contain the
-                    same value of related, but this thread only process those task of one VIM.  Also related can be the
-                    same among several NS os isntance-scenarios
-    MD worker:      Used to lock in case of several thread workers.
-
-"""
-
-import threading
-import time
-import Queue
-import logging
-import vimconn
-import vimconn_openvim
-import vimconn_aws
-import vimconn_opennebula
-import vimconn_openstack
-import vimconn_vmware
-import vimconn_fos
-import vimconn_azure
-import yaml
-from db_base import db_base_Exception
-from lib_osm_openvim.ovim import ovimException
-from copy import deepcopy
-
-__author__ = "Alfonso Tierno, Pablo Montes"
-__date__ = "$28-Sep-2017 12:07:15$"
-
-vim_module = {
-    "openvim": vimconn_openvim,
-    "aws": vimconn_aws,
-    "opennebula": vimconn_opennebula,
-    "openstack": vimconn_openstack,
-    "vmware": vimconn_vmware,
-    "fos": vimconn_fos,
-    "azure": vimconn_azure,
-}
-
-
-def is_task_id(task_id):
-    return task_id.startswith("TASK-")
-
-
-class VimThreadException(Exception):
-    pass
-
-
-class VimThreadExceptionNotFound(VimThreadException):
-    pass
-
-
-class vim_thread(threading.Thread):
-    REFRESH_BUILD = 5  # 5 seconds
-    REFRESH_ACTIVE = 60  # 1 minute
-    REFRESH_ERROR = 600
-    REFRESH_DELETE = 3600 * 10
-
-    def __init__(self, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None,
-                 db=None, db_lock=None, ovim=None):
-        """Init a thread.
-        Arguments:
-            'id' number of thead
-            'name' name of thread
-            'host','user':  host ip or name to manage and user
-            'db', 'db_lock': database class and lock to use it in exclusion
-        """
-        threading.Thread.__init__(self)
-        self.vim = None
-        self.error_status = None
-        self.datacenter_name = datacenter_name
-        self.datacenter_tenant_id = datacenter_tenant_id
-        self.ovim = ovim
-        if not name:
-            self.name = vimconn["id"] + "." + vimconn["config"]["datacenter_tenant_id"]
-        else:
-            self.name = name
-        self.vim_persistent_info = {}
-        self.my_id = self.name[:64]
-
-        self.logger = logging.getLogger('openmano.vim.' + self.name)
-        self.db = db
-        self.db_lock = db_lock
-
-        self.task_lock = task_lock
-        self.task_queue = Queue.Queue(2000)
-
-    def get_vimconnector(self):
-        try:
-            from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
-            select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
-                       'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
-                       'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
-                       'user', 'passwd', 'dt.config as dt_config')
-            where_ = {"dt.uuid": self.datacenter_tenant_id}
-            vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
-            vim = vims[0]
-            vim_config = {}
-            if vim["config"]:
-                vim_config.update(yaml.load(vim["config"]))
-            if vim["dt_config"]:
-                vim_config.update(yaml.load(vim["dt_config"]))
-            vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
-            vim_config['datacenter_id'] = vim.get('datacenter_id')
-
-            # get port_mapping
-            with self.db_lock:
-                vim_config["wim_external_ports"] = self.ovim.get_of_port_mappings(
-                    db_filter={"region": vim_config['datacenter_id'], "pci": None})
-
-            self.vim = vim_module[vim["type"]].vimconnector(
-                uuid=vim['datacenter_id'], name=vim['datacenter_name'],
-                tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
-                url=vim['vim_url'], url_admin=vim['vim_url_admin'],
-                user=vim['user'], passwd=vim['passwd'],
-                config=vim_config, persistent_info=self.vim_persistent_info
-            )
-            self.error_status = None
-        except Exception as e:
-            self.logger.error("Cannot load vimconnector for vim_account {}: {}".format(self.datacenter_tenant_id, e))
-            self.vim = None
-            self.error_status = "Error loading vimconnector: {}".format(e)
-
-    def _get_db_task(self):
-        """
-        Read actions from database and reload them at memory. Fill self.refresh_list, pending_list, vim_actions
-        :return: None
-        """
-        now = time.time()
-        try:
-            database_limit = 20
-            task_related = None
-            while True:
-                # get 20 (database_limit) entries each time
-                vim_actions = self.db.get_rows(FROM="vim_wim_actions",
-                                               WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                                      "status": ['SCHEDULED', 'BUILD', 'DONE'],
-                                                      "worker": [None, self.my_id], "modified_at<=": now
-                                                      },
-                                               ORDER_BY=("modified_at", "created_at",),
-                                               LIMIT=database_limit)
-                if not vim_actions:
-                    return None, None
-                # if vim_actions[0]["modified_at"] > now:
-                #     return int(vim_actions[0] - now)
-                for task in vim_actions:
-                    # block related task
-                    if task_related == task["related"]:
-                        continue  # ignore if a locking has already tried for these task set
-                    task_related = task["related"]
-                    # lock ...
-                    self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
-                                        WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                               "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                                               "worker": [None, self.my_id],
-                                               "related": task_related,
-                                               "item": task["item"],
-                                               })
-                    # ... and read all related and check if locked
-                    related_tasks = self.db.get_rows(FROM="vim_wim_actions",
-                                                     WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                                            "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                                                            "related": task_related,
-                                                            "item": task["item"],
-                                                            },
-                                                     ORDER_BY=("created_at",))
-                    # check that all related tasks have been locked. If not release and try again. It can happen
-                    # for race conditions if a new related task has been inserted by nfvo in the process
-                    some_tasks_locked = False
-                    some_tasks_not_locked = False
-                    creation_task = None
-                    for relate_task in related_tasks:
-                        if relate_task["worker"] != self.my_id:
-                            some_tasks_not_locked = True
-                        else:
-                            some_tasks_locked = True
-                        if not creation_task and relate_task["action"] in ("CREATE", "FIND"):
-                            creation_task = relate_task
-                    if some_tasks_not_locked:
-                        if some_tasks_locked:  # unlock
-                            self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
-                                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                                       "worker": self.my_id,
-                                                       "related": task_related,
-                                                       "item": task["item"],
-                                                       })
-                        continue
-
-                    # task of creation must be the first in the list of related_task
-                    assert(related_tasks[0]["action"] in ("CREATE", "FIND"))
-
-                    task["params"] = None
-                    if task["extra"]:
-                        extra = yaml.load(task["extra"])
-                    else:
-                        extra = {}
-                    task["extra"] = extra
-                    if extra.get("depends_on"):
-                        task["depends"] = {}
-                    if extra.get("params"):
-                        task["params"] = deepcopy(extra["params"])
-                    return task, related_tasks
-        except Exception as e:
-            self.logger.critical("Unexpected exception at _get_db_task: " + str(e), exc_info=True)
-            return None, None
-
-    def _delete_task(self, task):
-        """
-        Determine if this task need to be done or superseded
-        :return: None
-        """
-
-        def copy_extra_created(copy_to, copy_from):
-            copy_to["created"] = copy_from["created"]
-            if copy_from.get("sdn_net_id"):
-                copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
-            if copy_from.get("interfaces"):
-                copy_to["interfaces"] = copy_from["interfaces"]
-            if copy_from.get("created_items"):
-                if not copy_to.get("created_items"):
-                    copy_to["created_items"] = {}
-                copy_to["created_items"].update(copy_from["created_items"])
-
-        task_create = None
-        dependency_task = None
-        deletion_needed = False
-        if task["status"] == "FAILED":
-            return   # TODO need to be retry??
-        try:
-            # get all related tasks
-            related_tasks = self.db.get_rows(FROM="vim_wim_actions",
-                                             WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                                    "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                                                    "action": ["FIND", "CREATE"],
-                                                    "related": task["related"],
-                                                    },
-                                             ORDER_BY=("created_at",),
-                                             )
-            for related_task in related_tasks:
-                if related_task["item"] == task["item"] and related_task["item_id"] == task["item_id"]:
-                    task_create = related_task
-                    # TASK_CREATE
-                    if related_task["extra"]:
-                        extra_created = yaml.load(related_task["extra"])
-                        if extra_created.get("created"):
-                            deletion_needed = True
-                        related_task["extra"] = extra_created
-                elif not dependency_task:
-                    dependency_task = related_task
-                if task_create and dependency_task:
-                    break
-
-            # mark task_create as FINISHED
-            self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
-                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                       "instance_action_id": task_create["instance_action_id"],
-                                       "task_index": task_create["task_index"]
-                                       })
-            if not deletion_needed:
-                return
-            elif dependency_task:
-                # move create information  from task_create to relate_task
-                extra_new_created = yaml.load(dependency_task["extra"]) or {}
-                extra_new_created["created"] = extra_created["created"]
-                copy_extra_created(copy_to=extra_new_created, copy_from=extra_created)
-
-                self.db.update_rows("vim_wim_actions",
-                                    UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
-                                                                    width=256),
-                                            "vim_id": task_create.get("vim_id")},
-                                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                           "instance_action_id": dependency_task["instance_action_id"],
-                                           "task_index": dependency_task["task_index"]
-                                           })
-                return False
-            else:
-                task["vim_id"] = task_create["vim_id"]
-                copy_extra_created(copy_to=task["extra"], copy_from=task_create["extra"])
-                return True
-
-        except Exception as e:
-            self.logger.critical("Unexpected exception at _delete_task: " + str(e), exc_info=True)
-
-    def _refres_vm(self, task):
-        """Call VIM to get VMs status"""
-        database_update = None
-
-        vim_id = task["vim_id"]
-        vm_to_refresh_list = [vim_id]
-        try:
-            vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
-            vim_info = vim_dict[vim_id]
-        except vimconn.vimconnException as e:
-            # Mark all tasks at VIM_ERROR status
-            self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
-            vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
-
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
-
-        # check and update interfaces
-        task_warning_msg = ""
-        for interface in vim_info.get("interfaces", ()):
-            vim_interface_id = interface["vim_interface_id"]
-            if vim_interface_id not in task["extra"]["interfaces"]:
-                self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
-                    task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
-                continue
-            task_interface = task["extra"]["interfaces"][vim_interface_id]
-            task_vim_interface = task_interface.get("vim_info")
-            if task_vim_interface != interface:
-                # delete old port
-                if task_interface.get("sdn_port_id"):
-                    try:
-                        with self.db_lock:
-                            self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
-                            task_interface["sdn_port_id"] = None
-                    except ovimException as e:
-                        error_text = "ovimException deleting external_port={}: {}".format(
-                            task_interface["sdn_port_id"], e)
-                        self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
-                        task_warning_msg += error_text
-                        # TODO Set error_msg at instance_nets instead of instance VMs
-
-                # Create SDN port
-                sdn_net_id = task_interface.get("sdn_net_id")
-                if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
-                    sdn_port_name = sdn_net_id + "." + task["vim_id"]
-                    sdn_port_name = sdn_port_name[:63]
-                    try:
-                        with self.db_lock:
-                            sdn_port_id = self.ovim.new_external_port(
-                                {"compute_node": interface["compute_node"],
-                                    "pci": interface["pci"],
-                                    "vlan": interface.get("vlan"),
-                                    "net_id": sdn_net_id,
-                                    "region": self.vim["config"]["datacenter_id"],
-                                    "name": sdn_port_name,
-                                    "mac": interface.get("mac_address")})
-                            task_interface["sdn_port_id"] = sdn_port_id
-                    except (ovimException, Exception) as e:
-                        error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
-                            format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
-                        self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
-                        task_warning_msg += error_text
-                        # TODO Set error_msg at instance_nets instead of instance VMs
-
-                self.db.update_rows('instance_interfaces',
-                                    UPDATE={"mac_address": interface.get("mac_address"),
-                                            "ip_address": interface.get("ip_address"),
-                                            "vim_interface_id": interface.get("vim_interface_id"),
-                                            "vim_info": interface.get("vim_info"),
-                                            "sdn_port_id": task_interface.get("sdn_port_id"),
-                                            "compute_node": interface.get("compute_node"),
-                                            "pci": interface.get("pci"),
-                                            "vlan": interface.get("vlan")},
-                                    WHERE={'uuid': task_interface["iface_id"]})
-                task_interface["vim_info"] = interface
-
-        # check and update task and instance_vms database
-        vim_info_error_msg = None
-        if vim_info.get("error_msg"):
-            vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
-        elif task_warning_msg:
-            vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
-        task_vim_info = task["extra"].get("vim_info")
-        task_error_msg = task.get("error_msg")
-        task_vim_status = task["extra"].get("vim_status")
-        if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
-                (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
-            database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
-            if vim_info.get("vim_info"):
-                database_update["vim_info"] = vim_info["vim_info"]
-
-            task["extra"]["vim_status"] = vim_info["status"]
-            task["error_msg"] = vim_info_error_msg
-            if vim_info.get("vim_info"):
-                task["extra"]["vim_info"] = vim_info["vim_info"]
-
-        return database_update
-
-    def _refres_net(self, task):
-        """Call VIM to get network status"""
-        database_update = None
-
-        vim_id = task["vim_id"]
-        net_to_refresh_list = [vim_id]
-        try:
-            vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
-            vim_info = vim_dict[vim_id]
-        except vimconn.vimconnException as e:
-            # Mark all tasks at VIM_ERROR status
-            self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
-            vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
-
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
-
-        task_vim_info = task["extra"].get("vim_info")
-        task_vim_status = task["extra"].get("vim_status")
-        task_error_msg = task.get("error_msg")
-        task_sdn_net_id = task["extra"].get("sdn_net_id")
-
-        vim_info_status = vim_info["status"]
-        vim_info_error_msg = vim_info.get("error_msg")
-        # get ovim status
-        if task_sdn_net_id:
-            try:
-                with self.db_lock:
-                    sdn_net = self.ovim.show_network(task_sdn_net_id)
-            except (ovimException, Exception) as e:
-                text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
-                self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
-                sdn_net = {"status": "ERROR", "last_error": text_error}
-            if sdn_net["status"] == "ERROR":
-                if not vim_info_error_msg:
-                    vim_info_error_msg = str(sdn_net.get("last_error"))
-                else:
-                    vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
-                        self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
-                        self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
-                vim_info_status = "ERROR"
-            elif sdn_net["status"] == "BUILD":
-                if vim_info_status == "ACTIVE":
-                    vim_info_status = "BUILD"
-
-        # update database
-        if vim_info_error_msg:
-            vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
-        if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
-                (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
-            task["extra"]["vim_status"] = vim_info_status
-            task["error_msg"] = vim_info_error_msg
-            if vim_info.get("vim_info"):
-                task["extra"]["vim_info"] = vim_info["vim_info"]
-            database_update = {"status": vim_info_status, "error_msg": vim_info_error_msg}
-            if vim_info.get("vim_info"):
-                database_update["vim_info"] = vim_info["vim_info"]
-        return database_update
-
-    def _proccess_pending_tasks(self, task, related_tasks):
-        old_task_status = task["status"]
-        create_or_find = False   # if as result of processing this task something is created or found
-        next_refresh = 0
-
-        try:
-            if task["status"] == "SCHEDULED":
-                # check if tasks that this depends on have been completed
-                dependency_not_completed = False
-                dependency_modified_at = 0
-                for task_index in task["extra"].get("depends_on", ()):
-                    task_dependency = self._look_for_task(task["instance_action_id"], task_index)
-                    if not task_dependency:
-                        raise VimThreadException(
-                            "Cannot get depending net task trying to get depending task {}.{}".format(
-                                task["instance_action_id"], task_index))
-                    # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so
-                    # database must be look again
-                    if task_dependency["status"] == "SCHEDULED":
-                        dependency_not_completed = True
-                        dependency_modified_at = task_dependency["modified_at"]
-                        break
-                    elif task_dependency["status"] == "FAILED":
-                        raise VimThreadException(
-                            "Cannot {} {}, (task {}.{}) because depends on failed {}.{}, (task{}.{}): {}".format(
-                                task["action"], task["item"],
-                                task["instance_action_id"], task["task_index"],
-                                task_dependency["instance_action_id"], task_dependency["task_index"],
-                                task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
-
-                    task["depends"]["TASK-"+str(task_index)] = task_dependency
-                    task["depends"]["TASK-{}.{}".format(task["instance_action_id"], task_index)] = task_dependency
-                if dependency_not_completed:
-                    # Move this task to the time dependency is going to be modified plus 10 seconds.
-                    self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
-                                        UPDATE={"worker": None},
-                                        WHERE={"datacenter_vim_id": self.datacenter_tenant_id, "worker": self.my_id,
-                                               "related": task["related"],
-                                               })
-                    # task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
-                    # if task["extra"]["tries"] > 3:
-                    #     raise VimThreadException(
-                    #         "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
-                    #         "(task {}.{})".format(task["action"], task["item"],
-                    #                               task["instance_action_id"], task["task_index"],
-                    #                               task_dependency["instance_action_id"], task_dependency["task_index"]
-                    #                               task_dependency["action"], task_dependency["item"]))
-                    return
-
-            database_update = None
-            if task["action"] == "DELETE":
-                deleted_needed = self._delete_task(task)
-                if not deleted_needed:
-                    task["status"] = "SUPERSEDED"  # with FINISHED instead of DONE it will not be refreshing
-                    task["error_msg"] = None
-
-            if task["status"] == "SUPERSEDED":
-                # not needed to do anything but update database with the new status
-                database_update = None
-            elif not self.vim:
-                task["status"] = "FAILED"
-                task["error_msg"] = self.error_status
-                database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
-            elif task["item_id"] != related_tasks[0]["item_id"] and task["action"] in ("FIND", "CREATE"):
-                # Do nothing, just copy values from one to another and updata database
-                task["status"] = related_tasks[0]["status"]
-                task["error_msg"] = related_tasks[0]["error_msg"]
-                task["vim_id"] = related_tasks[0]["vim_id"]
-                extra = yaml.load(related_tasks[0]["extra"])
-                task["extra"]["vim_status"] = extra.get("vim_status")
-                next_refresh = related_tasks[0]["modified_at"] + 0.001
-                database_update = {"status": task["extra"].get("vim_status", "VIM_ERROR"),
-                                   "error_msg": task["error_msg"]}
-                if task["item"] == 'instance_vms':
-                    database_update["vim_vm_id"] = task["vim_id"]
-                elif task["item"] == 'instance_nets':
-                    database_update["vim_net_id"] = task["vim_id"]
-            elif task["item"] == 'instance_vms':
-                if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
-                    database_update = self._refres_vm(task)
-                    create_or_find = True
-                elif task["action"] == "CREATE":
-                    create_or_find = True
-                    database_update = self.new_vm(task)
-                elif task["action"] == "DELETE":
-                    self.del_vm(task)
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-            elif task["item"] == 'instance_nets':
-                if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
-                    database_update = self._refres_net(task)
-                    create_or_find = True
-                elif task["action"] == "CREATE":
-                    create_or_find = True
-                    database_update = self.new_net(task)
-                elif task["action"] == "DELETE":
-                    self.del_net(task)
-                elif task["action"] == "FIND":
-                    database_update = self.get_net(task)
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-            elif task["item"] == 'instance_sfis':
-                if task["action"] == "CREATE":
-                    create_or_find = True
-                    database_update = self.new_sfi(task)
-                elif task["action"] == "DELETE":
-                    self.del_sfi(task)
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-            elif task["item"] == 'instance_sfs':
-                if task["action"] == "CREATE":
-                    create_or_find = True
-                    database_update = self.new_sf(task)
-                elif task["action"] == "DELETE":
-                    self.del_sf(task)
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-            elif task["item"] == 'instance_classifications':
-                if task["action"] == "CREATE":
-                    create_or_find = True
-                    database_update = self.new_classification(task)
-                elif task["action"] == "DELETE":
-                    self.del_classification(task)
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-            elif task["item"] == 'instance_sfps':
-                if task["action"] == "CREATE":
-                    create_or_find = True
-                    database_update = self.new_sfp(task)
-                elif task["action"] == "DELETE":
-                    self.del_sfp(task)
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-            else:
-                raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
-                # TODO
-        except VimThreadException as e:
-            task["error_msg"] = str(e)
-            task["status"] = "FAILED"
-            database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
-            if task["item"] == 'instance_vms':
-                database_update["vim_vm_id"] = None
-            elif task["item"] == 'instance_nets':
-                database_update["vim_net_id"] = None
-
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
-            task_id, task["item"], task["action"], task["status"],
-            task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
-        try:
-            if not next_refresh:
-                if task["status"] == "DONE":
-                    next_refresh = time.time()
-                    if task["extra"].get("vim_status") == "BUILD":
-                        next_refresh += self.REFRESH_BUILD
-                    elif task["extra"].get("vim_status") in ("ERROR", "VIM_ERROR"):
-                        next_refresh += self.REFRESH_ERROR
-                    elif task["extra"].get("vim_status") == "DELETED":
-                        next_refresh += self.REFRESH_DELETE
-                    else:
-                        next_refresh += self.REFRESH_ACTIVE
-                elif task["status"] == "FAILED":
-                    next_refresh = time.time() + self.REFRESH_DELETE
-
-            if create_or_find:
-                # modify all related task with action FIND/CREATED non SCHEDULED
-                self.db.update_rows(
-                    table="vim_wim_actions", modified_time=next_refresh + 0.001,
-                    UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
-                            "error_msg": task["error_msg"],
-                            },
-
-                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                           "worker": self.my_id,
-                           "action": ["FIND", "CREATE"],
-                           "related": task["related"],
-                           "status<>": "SCHEDULED",
-                           })
-            # modify own task
-            self.db.update_rows(
-                table="vim_wim_actions", modified_time=next_refresh,
-                UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
-                        "error_msg": task["error_msg"],
-                        "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
-                WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
-            # Unlock tasks
-            self.db.update_rows(
-                table="vim_wim_actions", modified_time=0,
-                UPDATE={"worker": None},
-                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                       "worker": self.my_id,
-                       "related": task["related"],
-                       })
-
-            # Update table instance_actions
-            if old_task_status == "SCHEDULED" and task["status"] != old_task_status:
-                self.db.update_rows(
-                    table="instance_actions",
-                    UPDATE={("number_failed" if task["status"] == "FAILED" else "number_done"): {"INCREMENT": 1}},
-                    WHERE={"uuid": task["instance_action_id"]})
-            if database_update:
-                where_filter = {"related": task["related"]}
-                if task["item"] == "instance_nets" and task["datacenter_vim_id"]:
-                    where_filter["datacenter_tenant_id"] = task["datacenter_vim_id"] 
-                self.db.update_rows(table=task["item"],
-                                    UPDATE=database_update,
-                                    WHERE=where_filter)
-        except db_base_Exception as e:
-            self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
-
-    def insert_task(self, task):
-        try:
-            self.task_queue.put(task, False)
-            return None
-        except Queue.Full:
-            raise vimconn.vimconnException(self.name + ": timeout inserting a task")
-
-    def del_task(self, task):
-        with self.task_lock:
-            if task["status"] == "SCHEDULED":
-                task["status"] = "SUPERSEDED"
-                return True
-            else:  # task["status"] == "processing"
-                self.task_lock.release()
-                return False
-
-    def run(self):
-        self.logger.debug("Starting")
-        while True:
-            self.get_vimconnector()
-            self.logger.debug("Vimconnector loaded")
-            reload_thread = False
-
-            while True:
-                try:
-                    while not self.task_queue.empty():
-                        task = self.task_queue.get()
-                        if isinstance(task, list):
-                            pass
-                        elif isinstance(task, str):
-                            if task == 'exit':
-                                return 0
-                            elif task == 'reload':
-                                reload_thread = True
-                                break
-                        self.task_queue.task_done()
-                    if reload_thread:
-                        break
-
-                    task, related_tasks = self._get_db_task()
-                    if task:
-                        self._proccess_pending_tasks(task, related_tasks)
-                    else:
-                        time.sleep(5)
-
-                except Exception as e:
-                    self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
-
-        self.logger.debug("Finishing")
-
-    def _look_for_task(self, instance_action_id, task_id):
-        """
-        Look for a concrete task at vim_actions database table
-        :param instance_action_id: The instance_action_id
-        :param task_id: Can have several formats:
-            <task index>: integer
-            TASK-<task index> :backward compatibility,
-            [TASK-]<instance_action_id>.<task index>: this instance_action_id overrides the one in the parameter
-        :return: Task dictionary or None if not found
-        """
-        if isinstance(task_id, int):
-            task_index = task_id
-        else:
-            if task_id.startswith("TASK-"):
-                task_id = task_id[5:]
-            ins_action_id, _, task_index = task_id.rpartition(".")
-            if ins_action_id:
-                instance_action_id = ins_action_id
-
-        tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
-                                                                "task_index": task_index})
-        if not tasks:
-            return None
-        task = tasks[0]
-        task["params"] = None
-        task["depends"] = {}
-        if task["extra"]:
-            extra = yaml.load(task["extra"])
-            task["extra"] = extra
-            task["params"] = extra.get("params")
-        else:
-            task["extra"] = {}
-        return task
-
-    @staticmethod
-    def _format_vim_error_msg(error_text, max_length=1024):
-        if error_text and len(error_text) >= max_length:
-            return error_text[:max_length // 2 - 3] + " ... " + error_text[-max_length // 2 + 3:]
-        return error_text
-
-    def new_vm(self, task):
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        try:
-            params = task["params"]
-            depends = task.get("depends")
-            net_list = params[5]
-            for net in net_list:
-                if "net_id" in net and is_task_id(net["net_id"]):  # change task_id into network_id
-                    network_id = task["depends"][net["net_id"]].get("vim_id")
-                    if not network_id:
-                        raise VimThreadException(
-                            "Cannot create VM because depends on a network not created or found: " +
-                            str(depends[net["net_id"]]["error_msg"]))
-                    net["net_id"] = network_id
-            params_copy = deepcopy(params)
-            vim_vm_id, created_items = self.vim.new_vminstance(*params_copy)
-
-            # fill task_interfaces. Look for snd_net_id at database for each interface
-            task_interfaces = {}
-            for iface in params_copy[5]:
-                task_interfaces[iface["vim_id"]] = {"iface_id": iface["uuid"]}
-                result = self.db.get_rows(
-                    SELECT=('sdn_net_id', 'interface_id'),
-                    FROM='instance_nets as ine join instance_interfaces as ii on ii.instance_net_id=ine.uuid',
-                    WHERE={'ii.uuid': iface["uuid"]})
-                if result:
-                    task_interfaces[iface["vim_id"]]["sdn_net_id"] = result[0]['sdn_net_id']
-                    task_interfaces[iface["vim_id"]]["interface_id"] = result[0]['interface_id']
-                else:
-                    self.logger.critical("task={} new-VM: instance_nets uuid={} not found at DB".format(task_id,
-                                                                                                        iface["uuid"]),
-                                         exc_info=True)
-
-            task["vim_info"] = {}
-            task["extra"]["interfaces"] = task_interfaces
-            task["extra"]["created"] = True
-            task["extra"]["created_items"] = created_items
-            task["extra"]["vim_status"] = "BUILD"
-            task["error_msg"] = None
-            task["status"] = "DONE"
-            task["vim_id"] = vim_vm_id
-            instance_element_update = {"status": "BUILD", "vim_vm_id": vim_vm_id, "error_msg": None}
-            return instance_element_update
-
-        except (vimconn.vimconnException, VimThreadException) as e:
-            self.logger.error("task={} new-VM: {}".format(task_id, e))
-            error_text = self._format_vim_error_msg(str(e))
-            task["error_msg"] = error_text
-            task["status"] = "FAILED"
-            task["vim_id"] = None
-            instance_element_update = {"status": "VIM_ERROR", "vim_vm_id": None, "error_msg": error_text}
-            return instance_element_update
-
-    def del_vm(self, task):
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        vm_vim_id = task["vim_id"]
-        interfaces = task["extra"].get("interfaces", ())
-        try:
-            for iface in interfaces.values():
-                if iface.get("sdn_port_id"):
-                    try:
-                        with self.db_lock:
-                            self.ovim.delete_port(iface["sdn_port_id"], idempotent=True)
-                    except ovimException as e:
-                        self.logger.error("task={} del-VM: ovimException when deleting external_port={}: {} ".format(
-                            task_id, iface["sdn_port_id"], e), exc_info=True)
-                        # TODO Set error_msg at instance_nets
-
-            self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["error_msg"] = None
-            return None
-
-        except vimconn.vimconnException as e:
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            if isinstance(e, vimconn.vimconnNotFoundException):
-                # If not found mark as Done and fill error_msg
-                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-                return None
-            task["status"] = "FAILED"
-            return None
-
-    def _get_net_internal(self, task, filter_param):
-        """
-        Common code for get_net and new_net. It looks for a network on VIM with the filter_params
-        :param task: task for this find or find-or-create action
-        :param filter_param: parameters to send to the vimconnector
-        :return: a dict with the content to update the instance_nets database table. Raises an exception on error, or
-            when network is not found or found more than one
-        """
-        vim_nets = self.vim.get_network_list(filter_param)
-        if not vim_nets:
-            raise VimThreadExceptionNotFound("Network not found with this criteria: '{}'".format(filter_param))
-        elif len(vim_nets) > 1:
-            raise VimThreadException("More than one network found with this criteria: '{}'".format(filter_param))
-        vim_net_id = vim_nets[0]["id"]
-
-        # Discover if this network is managed by a sdn controller
-        sdn_net_id = None
-        result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
-                                  WHERE={'vim_net_id': vim_net_id, 'datacenter_tenant_id': self.datacenter_tenant_id},
-                                  ORDER="instance_scenario_id")
-        if result:
-            sdn_net_id = result[0]['sdn_net_id']
-
-        task["status"] = "DONE"
-        task["extra"]["vim_info"] = {}
-        task["extra"]["created"] = False
-        task["extra"]["vim_status"] = "BUILD"
-        task["extra"]["sdn_net_id"] = sdn_net_id
-        task["error_msg"] = None
-        task["vim_id"] = vim_net_id
-        instance_element_update = {"vim_net_id": vim_net_id, "created": False, "status": "BUILD",
-                                   "error_msg": None, "sdn_net_id": sdn_net_id}
-        return instance_element_update
-
-    def get_net(self, task):
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        try:
-            params = task["params"]
-            filter_param = params[0]
-            instance_element_update = self._get_net_internal(task, filter_param)
-            return instance_element_update
-
-        except (vimconn.vimconnException, VimThreadException) as e:
-            self.logger.error("task={} get-net: {}".format(task_id, e))
-            task["status"] = "FAILED"
-            task["vim_id"] = None
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            instance_element_update = {"vim_net_id": None, "status": "VIM_ERROR",
-                                       "error_msg": task["error_msg"]}
-            return instance_element_update
-
-    def new_net(self, task):
-        vim_net_id = None
-        sdn_net_id = None
-        task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        action_text = ""
-        try:
-            # FIND
-            if task["extra"].get("find"):
-                action_text = "finding"
-                filter_param = task["extra"]["find"][0]
-                try:
-                    instance_element_update = self._get_net_internal(task, filter_param)
-                    return instance_element_update
-                except VimThreadExceptionNotFound:
-                    pass
-            # CREATE
-            params = task["params"]
-            action_text = "creating VIM"
-            vim_net_id, created_items = self.vim.new_network(*params[0:3])
-
-            net_name = params[0]
-            net_type = params[1]
-            wim_account_name = None
-            if len(params) >= 4:
-                wim_account_name = params[3]
-
-            sdn_controller = self.vim.config.get('sdn-controller')
-            if sdn_controller and (net_type == "data" or net_type == "ptp"):
-                network = {"name": net_name, "type": net_type, "region": self.vim["config"]["datacenter_id"]}
-
-                vim_net = self.vim.get_network(vim_net_id)
-                if vim_net.get('encapsulation') != 'vlan':
-                    raise vimconn.vimconnException(
-                        "net '{}' defined as type '{}' has not vlan encapsulation '{}'".format(
-                            net_name, net_type, vim_net['encapsulation']))
-                network["vlan"] = vim_net.get('segmentation_id')
-                action_text = "creating SDN"
-                with self.db_lock:
-                    sdn_net_id = self.ovim.new_network(network)
-
-                if wim_account_name and self.vim.config["wim_external_ports"]:
-                    # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
-                    action_text = "attaching external port to ovim network"
-                    sdn_port_name = "external_port"
-                    sdn_port_data = {
-                        "compute_node": "__WIM:" + wim_account_name[0:58],
-                        "pci": None,
-                        "vlan": network["vlan"],
-                        "net_id": sdn_net_id,
-                        "region": self.vim["config"]["datacenter_id"],
-                        "name": sdn_port_name,
-                    }
-                    try:
-                        with self.db_lock:
-                            sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
-                    except ovimException:
-                        sdn_port_data["compute_node"] = "__WIM"
-                        with self.db_lock:
-                            sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
-                    self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
-                                                                                            sdn_net_id))
-            task["status"] = "DONE"
-            task["extra"]["vim_info"] = {}
-            task["extra"]["sdn_net_id"] = sdn_net_id
-            task["extra"]["vim_status"] = "BUILD"
-            task["extra"]["created"] = True
-            task["extra"]["created_items"] = created_items
-            task["error_msg"] = None
-            task["vim_id"] = vim_net_id
-            instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "BUILD",
-                                       "created": True, "error_msg": None}
-            return instance_element_update
-        except (vimconn.vimconnException, ovimException) as e:
-            self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
-            task["status"] = "FAILED"
-            task["vim_id"] = vim_net_id
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            task["extra"]["sdn_net_id"] = sdn_net_id
-            instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "VIM_ERROR",
-                                       "error_msg": task["error_msg"]}
-            return instance_element_update
-
-    def del_net(self, task):
-        net_vim_id = task["vim_id"]
-        sdn_net_id = task["extra"].get("sdn_net_id")
-        try:
-            if net_vim_id:
-                self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
-            if sdn_net_id:
-                # Delete any attached port to this sdn network. There can be ports associated to this network in case
-                # it was manually done using 'openmano vim-net-sdn-attach'
-                with self.db_lock:
-                    port_list = self.ovim.get_ports(columns={'uuid'},
-                                                    filter={'name': 'external_port', 'net_id': sdn_net_id})
-                    for port in port_list:
-                        self.ovim.delete_port(port['uuid'], idempotent=True)
-                    self.ovim.delete_network(sdn_net_id, idempotent=True)
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["error_msg"] = None
-            return None
-        except ovimException as e:
-            task["error_msg"] = self._format_vim_error_msg("ovimException obtaining and deleting external "
-                                                           "ports for net {}: {}".format(sdn_net_id, str(e)))
-        except vimconn.vimconnException as e:
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            if isinstance(e, vimconn.vimconnNotFoundException):
-                # If not found mark as Done and fill error_msg
-                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-                return None
-        task["status"] = "FAILED"
-        return None
-
-    # Service Function Instances
-    def new_sfi(self, task):
-        vim_sfi_id = None
-        try:
-            # Waits for interfaces to be ready (avoids failure)
-            time.sleep(1)
-            dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
-            task_id = task["instance_action_id"] + "." + str(task["task_index"])
-            error_text = ""
-            interfaces = task["depends"][dep_id]["extra"].get("interfaces")
-
-            ingress_interface_id = task.get("extra").get("params").get("ingress_interface_id")
-            egress_interface_id = task.get("extra").get("params").get("egress_interface_id")
-            ingress_vim_interface_id = None
-            egress_vim_interface_id = None
-            for vim_interface, interface_data in interfaces.iteritems():
-                if interface_data.get("interface_id") == ingress_interface_id:
-                    ingress_vim_interface_id = vim_interface
-                    break
-            if ingress_interface_id != egress_interface_id:
-                for vim_interface, interface_data in interfaces.iteritems():
-                    if interface_data.get("interface_id") == egress_interface_id:
-                        egress_vim_interface_id = vim_interface
-                        break
-            else:
-                egress_vim_interface_id = ingress_vim_interface_id
-            if not ingress_vim_interface_id or not egress_vim_interface_id:
-                error_text = "Error creating Service Function Instance, Ingress: {}, Egress: {}".format(
-                    ingress_vim_interface_id, egress_vim_interface_id)
-                self.logger.error(error_text)
-                task["error_msg"] = error_text
-                task["status"] = "FAILED"
-                task["vim_id"] = None
-                return None
-            # At the moment, every port associated with the VM will be used both as ingress and egress ports.
-            # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack,
-            # only the first ingress and first egress ports will be used to create the SFI (Port Pair).
-            ingress_port_id_list = [ingress_vim_interface_id]
-            egress_port_id_list = [egress_vim_interface_id]
-            name = "sfi-%s" % task["item_id"][:8]
-            # By default no form of IETF SFC Encapsulation will be used
-            vim_sfi_id = self.vim.new_sfi(name, ingress_port_id_list, egress_port_id_list, sfc_encap=False)
-
-            task["extra"]["created"] = True
-            task["extra"]["vim_status"] = "ACTIVE"
-            task["error_msg"] = None
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["vim_id"] = vim_sfi_id
-            instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
-            return instance_element_update
-
-        except (vimconn.vimconnException, VimThreadException) as e:
-            self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
-            error_text = self._format_vim_error_msg(str(e))
-            task["error_msg"] = error_text
-            task["status"] = "FAILED"
-            task["vim_id"] = None
-            instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
-            return instance_element_update
-
-    def del_sfi(self, task):
-        sfi_vim_id = task["vim_id"]
-        try:
-            self.vim.delete_sfi(sfi_vim_id)
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["error_msg"] = None
-            return None
-
-        except vimconn.vimconnException as e:
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            if isinstance(e, vimconn.vimconnNotFoundException):
-                # If not found mark as Done and fill error_msg
-                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-                return None
-            task["status"] = "FAILED"
-            return None
-
-    def new_sf(self, task):
-        vim_sf_id = None
-        try:
-            task_id = task["instance_action_id"] + "." + str(task["task_index"])
-            error_text = ""
-            depending_tasks = ["TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
-            # sfis = task.get("depends").values()[0].get("extra").get("params")[5]
-            sfis = [task.get("depends").get(dep_task) for dep_task in depending_tasks]
-            sfi_id_list = []
-            for sfi in sfis:
-                sfi_id_list.append(sfi.get("vim_id"))
-            name = "sf-%s" % task["item_id"][:8]
-            # By default no form of IETF SFC Encapsulation will be used
-            vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
-
-            task["extra"]["created"] = True
-            task["extra"]["vim_status"] = "ACTIVE"
-            task["error_msg"] = None
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["vim_id"] = vim_sf_id
-            instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
-            return instance_element_update
-
-        except (vimconn.vimconnException, VimThreadException) as e:
-            self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
-            error_text = self._format_vim_error_msg(str(e))
-            task["error_msg"] = error_text
-            task["status"] = "FAILED"
-            task["vim_id"] = None
-            instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
-            return instance_element_update
-
-    def del_sf(self, task):
-        sf_vim_id = task["vim_id"]
-        try:
-            self.vim.delete_sf(sf_vim_id)
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["error_msg"] = None
-            return None
-
-        except vimconn.vimconnException as e:
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            if isinstance(e, vimconn.vimconnNotFoundException):
-                # If not found mark as Done and fill error_msg
-                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-                return None
-            task["status"] = "FAILED"
-            return None
-
-    def new_classification(self, task):
-        vim_classification_id = None
-        try:
-            params = task["params"]
-            task_id = task["instance_action_id"] + "." + str(task["task_index"])
-            dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
-            error_text = ""
-            interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces").keys()
-            # Bear in mind that different VIM connectors might support Classifications differently.
-            # In the case of OpenStack, only the first VNF attached to the classifier will be used
-            # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
-            # Since the VNFFG classifier match lacks the ethertype, classification defaults to
-            # using the IPv4 flow classifier.
-            name = "c-%s" % task["item_id"][:8]
-            # if not CIDR is given for the IP addresses, add /32:
-            ip_proto = int(params.get("ip_proto"))
-            source_ip = params.get("source_ip")
-            destination_ip = params.get("destination_ip")
-            source_port = params.get("source_port")
-            destination_port = params.get("destination_port")
-            definition = {"logical_source_port": interfaces[0]}
-            if ip_proto:
-                if ip_proto == 1:
-                    ip_proto = 'icmp'
-                elif ip_proto == 6:
-                    ip_proto = 'tcp'
-                elif ip_proto == 17:
-                    ip_proto = 'udp'
-                definition["protocol"] = ip_proto
-            if source_ip:
-                if '/' not in source_ip:
-                    source_ip += '/32'
-                definition["source_ip_prefix"] = source_ip
-            if source_port:
-                definition["source_port_range_min"] = source_port
-                definition["source_port_range_max"] = source_port
-            if destination_port:
-                definition["destination_port_range_min"] = destination_port
-                definition["destination_port_range_max"] = destination_port
-            if destination_ip:
-                if '/' not in destination_ip:
-                    destination_ip += '/32'
-                definition["destination_ip_prefix"] = destination_ip
-
-            vim_classification_id = self.vim.new_classification(
-                name, 'legacy_flow_classifier', definition)
-
-            task["extra"]["created"] = True
-            task["extra"]["vim_status"] = "ACTIVE"
-            task["error_msg"] = None
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["vim_id"] = vim_classification_id
-            instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id,
-                                       "error_msg": None}
-            return instance_element_update
-
-        except (vimconn.vimconnException, VimThreadException) as e:
-            self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
-            error_text = self._format_vim_error_msg(str(e))
-            task["error_msg"] = error_text
-            task["status"] = "FAILED"
-            task["vim_id"] = None
-            instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
-            return instance_element_update
-
-    def del_classification(self, task):
-        classification_vim_id = task["vim_id"]
-        try:
-            self.vim.delete_classification(classification_vim_id)
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["error_msg"] = None
-            return None
-
-        except vimconn.vimconnException as e:
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            if isinstance(e, vimconn.vimconnNotFoundException):
-                # If not found mark as Done and fill error_msg
-                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-                return None
-            task["status"] = "FAILED"
-            return None
-
-    def new_sfp(self, task):
-        vim_sfp_id = None
-        try:
-            task_id = task["instance_action_id"] + "." + str(task["task_index"])
-            depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in
-                               task.get("extra").get("depends_on")]
-            error_text = ""
-            sf_id_list = []
-            classification_id_list = []
-            for dep in depending_tasks:
-                vim_id = dep.get("vim_id")
-                resource = dep.get("item")
-                if resource == "instance_sfs":
-                    sf_id_list.append(vim_id)
-                elif resource == "instance_classifications":
-                    classification_id_list.append(vim_id)
-
-            name = "sfp-%s" % task["item_id"][:8]
-            # By default no form of IETF SFC Encapsulation will be used
-            vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
-
-            task["extra"]["created"] = True
-            task["extra"]["vim_status"] = "ACTIVE"
-            task["error_msg"] = None
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["vim_id"] = vim_sfp_id
-            instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
-            return instance_element_update
-
-        except (vimconn.vimconnException, VimThreadException) as e:
-            self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
-            error_text = self._format_vim_error_msg(str(e))
-            task["error_msg"] = error_text
-            task["status"] = "FAILED"
-            task["vim_id"] = None
-            instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
-            return instance_element_update
-
-    def del_sfp(self, task):
-        sfp_vim_id = task["vim_id"]
-        try:
-            self.vim.delete_sfp(sfp_vim_id)
-            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-            task["error_msg"] = None
-            return None
-
-        except vimconn.vimconnException as e:
-            task["error_msg"] = self._format_vim_error_msg(str(e))
-            if isinstance(e, vimconn.vimconnNotFoundException):
-                # If not found mark as Done and fill error_msg
-                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
-                return None
-            task["status"] = "FAILED"
-            return None
diff --git a/osm_ro/vimconn.py b/osm_ro/vimconn.py
deleted file mode 100644 (file)
index 957c410..0000000
+++ /dev/null
@@ -1,942 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-vimconn implement an Abstract class for the vim connector plugins
- with the definition of the method to be implemented.
-"""
-
-import logging
-import paramiko
-import socket
-import StringIO
-import yaml
-import sys
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
-from utils import deprecated
-
-__author__ = "Alfonso Tierno, Igor D.C."
-__date__  = "$14-aug-2017 23:59:59$"
-
-#Error variables 
-HTTP_Bad_Request = 400
-HTTP_Unauthorized = 401 
-HTTP_Not_Found = 404 
-HTTP_Method_Not_Allowed = 405 
-HTTP_Request_Timeout = 408
-HTTP_Conflict = 409
-HTTP_Not_Implemented = 501
-HTTP_Service_Unavailable = 503 
-HTTP_Internal_Server_Error = 500 
-
-
-class vimconnException(Exception):
-    """Common and base class Exception for all vimconnector exceptions"""
-    def __init__(self, message, http_code=HTTP_Bad_Request):
-        Exception.__init__(self, message)
-        self.http_code = http_code
-
-
-class vimconnConnectionException(vimconnException):
-    """Connectivity error with the VIM"""
-    def __init__(self, message, http_code=HTTP_Service_Unavailable):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnUnexpectedResponse(vimconnException):
-    """Get an wrong response from VIM"""
-    def __init__(self, message, http_code=HTTP_Service_Unavailable):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnAuthException(vimconnException):
-    """Invalid credentials or authorization to perform this action over the VIM"""
-    def __init__(self, message, http_code=HTTP_Unauthorized):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnNotFoundException(vimconnException):
-    """The item is not found at VIM"""
-    def __init__(self, message, http_code=HTTP_Not_Found):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnConflictException(vimconnException):
-    """There is a conflict, e.g. more item found than one"""
-    def __init__(self, message, http_code=HTTP_Conflict):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnNotSupportedException(vimconnException):
-    """The request is not supported by connector"""
-    def __init__(self, message, http_code=HTTP_Service_Unavailable):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnNotImplemented(vimconnException):
-    """The method is not implemented by the connected"""
-    def __init__(self, message, http_code=HTTP_Not_Implemented):
-        vimconnException.__init__(self, message, http_code)
-
-
-class vimconnector():
-    """Abstract base class for all the VIM connector plugins
-    These plugins must implement a vimconnector class derived from this 
-    and all these privated methods
-    """ 
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
-                 config={}, persitent_info={}):
-        """
-        Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity
-            checking against the VIM
-        :param uuid: internal id of this VIM
-        :param name: name assigned to this VIM, can be used for logging
-        :param tenant_id: 'tenant_id': (only one of them is mandatory) VIM tenant to be used
-        :param tenant_name: 'tenant_name': (only one of them is mandatory) VIM tenant to be used
-        :param url: url used for normal operations
-        :param url_admin: (optional), url used for administrative tasks
-        :param user: user to access
-        :param passwd: password
-        :param log_level: provided if it should use a different log_level than the general one
-        :param config: dictionary with extra VIM information. This contains a consolidate version of VIM config
-                    at VIM_ACCOUNT (attach)
-        :param persitent_info: dict where the class can store information that will be available among class
-                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
-                    empty dict. Useful to store login/tokens information for speed up communication
-
-        """
-        self.id = uuid
-        self.name = name
-        self.url = url
-        self.url_admin = url_admin
-        self.tenant_id = tenant_id
-        self.tenant_name = tenant_name
-        self.user = user
-        self.passwd = passwd
-        self.config = config or {}
-        self.availability_zone = None
-        self.logger = logging.getLogger('openmano.vim')
-        if log_level:
-            self.logger.setLevel(getattr(logging, log_level))
-        if not self.url_admin:   # try to use normal url
-            self.url_admin = self.url
-    
-    def __getitem__(self, index):
-        if index == 'tenant_id':
-            return self.tenant_id
-        if index == 'tenant_name':
-            return self.tenant_name
-        elif index == 'id':
-            return self.id
-        elif index == 'name':
-            return self.name
-        elif index == 'user':
-            return self.user
-        elif index == 'passwd':
-            return self.passwd
-        elif index == 'url':
-            return self.url
-        elif index == 'url_admin':
-            return self.url_admin
-        elif index == "config":
-            return self.config
-        else:
-            raise KeyError("Invalid key '{}'".format(index))
-        
-    def __setitem__(self, index, value):
-        if index == 'tenant_id':
-            self.tenant_id = value
-        if index == 'tenant_name':
-            self.tenant_name = value
-        elif index == 'id':
-            self.id = value
-        elif index == 'name':
-            self.name = value
-        elif index == 'user':
-            self.user = value
-        elif index == 'passwd':
-            self.passwd = value
-        elif index == 'url':
-            self.url = value
-        elif index == 'url_admin':
-            self.url_admin = value
-        else:
-            raise KeyError("Invalid key '{}'".format(index))
-
-    @staticmethod
-    def _create_mimemultipart(content_list):
-        """Creates a MIMEmultipart text combining the content_list
-        :param content_list: list of text scripts to be combined
-        :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one
-        element MIMEmultipart is not created and this content is returned
-        """
-        if not content_list:
-            return None
-        elif len(content_list) == 1:
-            return content_list[0]
-        combined_message = MIMEMultipart()
-        for content in content_list:
-            if content.startswith('#include'):
-                mime_format = 'text/x-include-url'
-            elif content.startswith('#include-once'):
-                mime_format = 'text/x-include-once-url'
-            elif content.startswith('#!'):
-                mime_format = 'text/x-shellscript'
-            elif content.startswith('#cloud-config'):
-                mime_format = 'text/cloud-config'
-            elif content.startswith('#cloud-config-archive'):
-                mime_format = 'text/cloud-config-archive'
-            elif content.startswith('#upstart-job'):
-                mime_format = 'text/upstart-job'
-            elif content.startswith('#part-handler'):
-                mime_format = 'text/part-handler'
-            elif content.startswith('#cloud-boothook'):
-                mime_format = 'text/cloud-boothook'
-            else:  # by default
-                mime_format = 'text/x-shellscript'
-            sub_message = MIMEText(content, mime_format, sys.getdefaultencoding())
-            combined_message.attach(sub_message)
-        return combined_message.as_string()
-
-    def _create_user_data(self, cloud_config):
-        """
-        Creates a script user database on cloud_config info
-        :param cloud_config: dictionary with
-            'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-            'users': (optional) list of users to be inserted, each item is a dict with:
-                'name': (mandatory) user name,
-                'key-pairs': (optional) list of strings with the public key to be inserted to the user
-            'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-            'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                'dest': (mandatory) string with the destination absolute path
-                'encoding': (optional, by default text). Can be one of:
-                    'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                'content' (mandatory): string with the content of the file
-                'permissions': (optional) string with file permissions, typically octal notation '0644'
-                'owner': (optional) file owner, string with the format 'owner:group'
-            'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-        :return: config_drive, userdata. The first is a boolean or None, the second a string or None
-        """
-        config_drive = None
-        userdata = None
-        userdata_list = []
-        if isinstance(cloud_config, dict):
-            if cloud_config.get("user-data"):
-                if isinstance(cloud_config["user-data"], str):
-                    userdata_list.append(cloud_config["user-data"])
-                else:
-                    for u in cloud_config["user-data"]:
-                        userdata_list.append(u)
-            if cloud_config.get("boot-data-drive") is not None:
-                config_drive = cloud_config["boot-data-drive"]
-            if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
-                userdata_dict = {}
-                # default user
-                if cloud_config.get("key-pairs"):
-                    userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
-                    userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"]}]
-                if cloud_config.get("users"):
-                    if "users" not in userdata_dict:
-                        userdata_dict["users"] = ["default"]
-                    for user in cloud_config["users"]:
-                        user_info = {
-                            "name": user["name"],
-                            "sudo": "ALL = (ALL)NOPASSWD:ALL"
-                        }
-                        if "user-info" in user:
-                            user_info["gecos"] = user["user-info"]
-                        if user.get("key-pairs"):
-                            user_info["ssh-authorized-keys"] = user["key-pairs"]
-                        userdata_dict["users"].append(user_info)
-
-                if cloud_config.get("config-files"):
-                    userdata_dict["write_files"] = []
-                    for file in cloud_config["config-files"]:
-                        file_info = {
-                            "path": file["dest"],
-                            "content": file["content"]
-                        }
-                        if file.get("encoding"):
-                            file_info["encoding"] = file["encoding"]
-                        if file.get("permissions"):
-                            file_info["permissions"] = file["permissions"]
-                        if file.get("owner"):
-                            file_info["owner"] = file["owner"]
-                        userdata_dict["write_files"].append(file_info)
-                userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
-                                                                        default_flow_style=False))
-            userdata = self._create_mimemultipart(userdata_list)
-            self.logger.debug("userdata: %s", userdata)
-        elif isinstance(cloud_config, str):
-            userdata = cloud_config
-        return config_drive, userdata
-
-    def check_vim_connectivity(self):
-        """Checks VIM can be reached and user credentials are ok.
-        Returns None if success or raises vimconnConnectionException, vimconnAuthException, ...
-        """
-        # by default no checking until each connector implements it
-        return None
-
-    def new_tenant(self, tenant_name, tenant_description):
-        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
-        "tenant_name": string max lenght 64
-        "tenant_description": string max length 256
-        returns the tenant identifier or raise exception
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def delete_tenant(self, tenant_id):
-        """Delete a tenant from VIM
-        tenant_id: returned VIM tenant_id on "new_tenant"
-        Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def get_tenant_list(self, filter_dict={}):
-        """Obtain tenants of VIM
-        filter_dict dictionary that can contain the following keys:
-            name: filter by tenant name
-            id: filter by tenant uuid/id
-            <other VIM specific>
-        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
-            [{'name':'<name>, 'id':'<id>, ...}, ...]
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network
-                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
-                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway_address': (Optional) ip_schema, that is X.X.X.X
-                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
-                'dhcp_enabled': True or False
-                'dhcp_start_address': ip_schema, first IP to grant
-                'dhcp_count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def get_network_list(self, filter_dict={}):
-        """Obtain tenant networks of VIM
-        Params:
-            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
-                name: string  => returns only networks with this name
-                id:   string  => returns networks with this VIM id, this imply returns one network at most
-                shared: boolean >= returns only networks that are (or are not) shared
-                tenant_id: sting => returns only networks that belong to this tenant/project
-                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
-                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
-        Returns the network list of dictionaries. each dictionary contains:
-            'id': (mandatory) VIM network id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
-            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def get_network(self, net_id):
-        """Obtain network details from the 'net_id' VIM network
-        Return a dict that contains:
-            'id': (mandatory) VIM network id, that is, net_id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        Raises an exception upon error or when network is not found
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def delete_network(self, net_id, created_items=None):
-        """
-        Removes a tenant network from VIM and its associated elements
-        :param net_id: VIM identifier of the network, provided by method new_network
-        :param created_items: dictionary with extra items to be deleted. provided by method new_network
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def refresh_nets_status(self, net_list):
-        """Get the status of the networks
-        Params:
-            'net_list': a list with the VIM network id to be get the status
-        Returns a dictionary with:
-            'net_id':         #VIM id of this network
-                status:     #Mandatory. Text with one of:
-                    #  DELETED (not found at vim)
-                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
-                    #  OTHER (Vim reported other status not understood)
-                    #  ERROR (VIM indicates an ERROR status)
-                    #  ACTIVE, INACTIVE, DOWN (admin down),
-                    #  BUILD (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-            'net_id2': ...
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def get_flavor(self, flavor_id):
-        """Obtain flavor details from the VIM
-        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
-        Raises an exception upon error or if not found
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def get_flavor_id_from_data(self, flavor_dict):
-        """Obtain flavor id that match the flavor description
-        Params:
-            'flavor_dict': dictionary that contains:
-                'disk': main hard disk in GB
-                'ram': meomry in MB
-                'vcpus': number of virtual cpus
-                #TODO: complete parameters for EPA
-        Returns the flavor_id or raises a vimconnNotFoundException
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def new_flavor(self, flavor_data):
-        """Adds a tenant flavor to VIM
-            flavor_data contains a dictionary with information, keys:
-                name: flavor name
-                ram: memory (cloud type) in MBytes
-                vpcus: cpus (cloud type)
-                extended: EPA parameters
-                  - numas: #items requested in same NUMA
-                        memory: number of 1G huge pages memory
-                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
-                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
-                          - name: interface name
-                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
-                            bandwidth: X Gbps; requested guarantee bandwidth
-                            vpci: requested virtual PCI address   
-                disk: disk size
-                is_public:
-                 #TODO to concrete
-        Returns the flavor identifier"""
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def delete_flavor(self, flavor_id):
-        """Deletes a tenant flavor from VIM identify by its id
-        Returns the used id or raise an exception"""
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def new_image(self, image_dict):
-        """ Adds a tenant image to VIM
-        Returns the image id or raises an exception if failed
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def delete_image(self, image_id):
-        """Deletes a tenant image from VIM
-        Returns the image_id if image is deleted or raises an exception on error"""
-        raise vimconnNotImplemented("Should have implemented this")
-
-    def get_image_id_from_path(self, path):
-        """Get the image id from image path in the VIM database.
-           Returns the image_id or raises a vimconnNotFoundException
-        """
-        raise vimconnNotImplemented("Should have implemented this")
-        
-    def get_image_list(self, filter_dict={}):
-        """Obtain tenant images from VIM
-        Filter_dict can be:
-            name: image name
-            id: image uuid
-            checksum: image checksum
-            location: image path
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
-        availability_zone_index=None, availability_zone_list=None):
-        """Adds a VM instance to VIM
-        Params:
-            'start': (boolean) indicates if VM must start or created in pause mode.
-            'image_id','flavor_id': image and flavor VIM id to use for the VM
-            'net_list': list of interfaces, each one is a dictionary with:
-                'name': (optional) name for the interface.
-                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
-                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
-                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
-                'mac_address': (optional) mac address to assign to this interface
-                'ip_address': (optional) IP address to assign to this interface
-                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
-                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
-                'type': (mandatory) can be one of:
-                    'virtual', in this case always connected to a network of type 'net_type=bridge'
-                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
-                           can created unconnected
-                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
-                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
-                            are allocated on the same physical NIC
-                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
-                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
-                                or True, it must apply the default VIM behaviour
-                After execution the method will add the key:
-                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
-                        interface. 'net_list' is modified
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-        
-    def get_vminstance(self,vm_id):
-        """Returns the VM instance information from VIM"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-        
-    def delete_vminstance(self, vm_id, created_items=None):
-        """
-        Removes a VM instance from VIM and its associated elements
-        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
-        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
-            action_vminstance
-        :return: None or the same vm_id. Raises an exception on fail
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    def refresh_vms_status(self, vm_list):
-        """Get the status of the virtual machines and their interfaces/ports
-           Params: the list of VM identifiers
-           Returns a dictionary with:
-                vm_id:          #VIM id of this Virtual Machine
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
-                                #  BUILD (on building process), ERROR
-                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                    interfaces: list with interface info. Each item a dictionary with:
-                        vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                        vim_net_id:       #network id where this interface is connected, if provided at creation
-                        vim_interface_id: #interface/port VIM id
-                        ip_address:       #null, or text with IPv4, IPv6 address
-                        compute_node:     #identification of compute node where PF,VF interface is allocated
-                        pci:              #PCI address of the NIC that hosts the PF,VF
-                        vlan:             #physical VLAN used for VF
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-    
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        """
-        Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.
-        created_items is a dictionary with items that
-        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
-        :param action_dict: dictionary with the action to perform
-        :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to
-            the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector
-            dependent, but do not use nested dictionaries and a value of None should be the same as not present. This
-            method can modify this value
-        :return: None, or a console dict
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-    
-    def get_vminstance_console(self, vm_id, console_type="vnc"):
-        """
-        Get a console for the virtual machine
-        Params:
-            vm_id: uuid of the VM
-            console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types, 
-                "rdp-html5" for RDP types, "spice-html5" for SPICE types
-        Returns dict with the console parameters:
-                protocol: ssh, ftp, http, https, ...
-                server:   usually ip address 
-                port:     the http, ssh, ... port 
-                suffix:   extra text, e.g. the http path and query string   
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    def inject_user_key(self, ip_addr=None, user=None, key=None, ro_key=None, password=None):
-        """
-        Inject a ssh public key in a VM
-        Params:
-            ip_addr: ip address of the VM
-            user: username (default-user) to enter in the VM
-            key: public key to be injected in the VM
-            ro_key: private key of the RO, used to enter in the VM if the password is not provided
-            password: password of the user to enter in the VM
-        The function doesn't return a value:
-        """
-        if not ip_addr or not user:
-            raise vimconnNotSupportedException("All parameters should be different from 'None'")
-        elif not ro_key and not password:
-            raise vimconnNotSupportedException("All parameters should be different from 'None'")
-        else:
-            commands = {'mkdir -p ~/.ssh/', 'echo "%s" >> ~/.ssh/authorized_keys' % key,
-                        'chmod 644 ~/.ssh/authorized_keys', 'chmod 700 ~/.ssh/'}
-            client = paramiko.SSHClient()
-            try:
-                if ro_key:
-                    pkey = paramiko.RSAKey.from_private_key(StringIO.StringIO(ro_key))
-                else:
-                    pkey = None
-                client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-                client.connect(ip_addr, username=user, password=password, pkey=pkey, timeout=10)
-                for command in commands:
-                    (i, o, e) = client.exec_command(command, timeout=10)
-                    returncode = o.channel.recv_exit_status()
-                    output = o.read()
-                    outerror = e.read()
-                    if returncode != 0:
-                        text = "run_command='{}' Error='{}'".format(command, outerror)
-                        raise vimconnUnexpectedResponse("Cannot inject ssh key in VM: '{}'".format(text))
-                        return
-            except (socket.error, paramiko.AuthenticationException, paramiko.SSHException) as message:
-                raise vimconnUnexpectedResponse(
-                    "Cannot inject ssh key in VM: '{}' - {}".format(ip_addr, str(message)))
-                return
-
-# Optional methods
-
-    def new_tenant(self,tenant_name,tenant_description):
-        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
-        "tenant_name": string max lenght 64
-        "tenant_description": string max length 256
-        returns the tenant identifier or raise exception
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    def delete_tenant(self,tenant_id,):
-        """Delete a tenant from VIM
-        tenant_id: returned VIM tenant_id on "new_tenant"
-        Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    def get_tenant_list(self, filter_dict=None):
-        """Obtain tenants of VIM
-        filter_dict dictionary that can contain the following keys:
-            name: filter by tenant name
-            id: filter by tenant uuid/id
-            <other VIM specific>
-        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
-            [{'name':'<name>, 'id':'<id>, ...}, ...]
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    def new_classification(self, name, ctype, definition):
-        """Creates a traffic classification in the VIM
-        Params:
-            'name': name of this classification
-            'ctype': type of this classification
-            'definition': definition of this classification (type-dependent free-form text)
-        Returns the VIM's classification ID on success or raises an exception on failure
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_classification(self, classification_id):
-        """Obtain classification details of the VIM's classification with ID='classification_id'
-        Return a dict that contains:
-            'id': VIM's classification ID (same as classification_id)
-            'name': VIM's classification name
-            'type': type of this classification
-            'definition': definition of the classification
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when classification is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_classification_list(self, filter_dict={}):
-        """Obtain classifications from the VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the classifications on and only return those that match ALL:
-                id:   string => returns classifications with this VIM's classification ID, which implies a return of one classification at most
-                name: string => returns only classifications with this name
-                type: string => returns classifications of this type
-                definition: string => returns classifications that have this definition
-                tenant_id: string => returns only classifications that belong to this tenant/project
-        Returns a list of classification dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's classification ID
-            'name': (mandatory) VIM's classification name
-            'type': type of this classification
-            'definition': definition of the classification
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no classification matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def delete_classification(self, classification_id):
-        """Deletes a classification from the VIM
-        Returns the classification ID (classification_id) or raises an exception upon error or when classification is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
-        """Creates a service function instance in the VIM
-        Params:
-            'name': name of this service function instance
-            'ingress_ports': set of ingress ports (VIM's port IDs)
-            'egress_ports': set of egress ports (VIM's port IDs)
-            'sfc_encap': boolean stating whether this specific instance supports IETF SFC Encapsulation
-        Returns the VIM's service function instance ID on success or raises an exception on failure
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_sfi(self, sfi_id):
-        """Obtain service function instance details of the VIM's service function instance with ID='sfi_id'
-        Return a dict that contains:
-            'id': VIM's sfi ID (same as sfi_id)
-            'name': VIM's sfi name
-            'ingress_ports': set of ingress ports (VIM's port IDs)
-            'egress_ports': set of egress ports (VIM's port IDs)
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when service function instance is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_sfi_list(self, filter_dict={}):
-        """Obtain service function instances from the VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the sfis on and only return those that match ALL:
-                id:   string  => returns sfis with this VIM's sfi ID, which implies a return of one sfi at most
-                name: string  => returns only service function instances with this name
-                tenant_id: string => returns only service function instances that belong to this tenant/project
-        Returns a list of service function instance dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's sfi ID
-            'name': (mandatory) VIM's sfi name
-            'ingress_ports': set of ingress ports (VIM's port IDs)
-            'egress_ports': set of egress ports (VIM's port IDs)
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no sfi matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def delete_sfi(self, sfi_id):
-        """Deletes a service function instance from the VIM
-        Returns the service function instance ID (sfi_id) or raises an exception upon error or when sfi is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def new_sf(self, name, sfis, sfc_encap=True):
-        """Creates (an abstract) service function in the VIM
-        Params:
-            'name': name of this service function
-            'sfis': set of service function instances of this (abstract) service function
-            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
-        Returns the VIM's service function ID on success or raises an exception on failure
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_sf(self, sf_id):
-        """Obtain service function details of the VIM's service function with ID='sf_id'
-        Return a dict that contains:
-            'id': VIM's sf ID (same as sf_id)
-            'name': VIM's sf name
-            'sfis': VIM's sf's set of VIM's service function instance IDs
-            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when sf is not found
-        """
-
-    def get_sf_list(self, filter_dict={}):
-        """Obtain service functions from the VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the sfs on and only return those that match ALL:
-                id:   string  => returns sfs with this VIM's sf ID, which implies a return of one sf at most
-                name: string  => returns only service functions with this name
-                tenant_id: string => returns only service functions that belong to this tenant/project
-        Returns a list of service function dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's sf ID
-            'name': (mandatory) VIM's sf name
-            'sfis': VIM's sf's set of VIM's service function instance IDs
-            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no sf matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def delete_sf(self, sf_id):
-        """Deletes (an abstract) service function from the VIM
-        Returns the service function ID (sf_id) or raises an exception upon error or when sf is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        """Creates a service function path
-        Params:
-            'name': name of this service function path
-            'classifications': set of traffic classifications that should be matched on to get into this sfp
-            'sfs': list of every service function that constitutes this path , from first to last
-            'sfc_encap': whether this is an SFC-Encapsulated chain (i.e using NSH), True by default
-            'spi': (optional) the Service Function Path identifier (SPI: Service Path Identifier) for this path
-        Returns the VIM's sfp ID on success or raises an exception on failure
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_sfp(self, sfp_id):
-        """Obtain service function path details of the VIM's sfp with ID='sfp_id'
-        Return a dict that contains:
-            'id': VIM's sfp ID (same as sfp_id)
-            'name': VIM's sfp name
-            'classifications': VIM's sfp's list of VIM's classification IDs
-            'sfs': VIM's sfp's list of VIM's service function IDs
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when sfp is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def get_sfp_list(self, filter_dict={}):
-        """Obtain service function paths from VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the sfps on, and only return those that match ALL:
-                id:   string  => returns sfps with this VIM's sfp ID , which implies a return of one sfp at most
-                name: string  => returns only sfps with this name
-                tenant_id: string => returns only sfps that belong to this tenant/project
-        Returns a list of service function path dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's sfp ID
-            'name': (mandatory) VIM's sfp name
-            'classifications': VIM's sfp's list of VIM's classification IDs
-            'sfs': VIM's sfp's list of VIM's service function IDs
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no sfp matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-    def delete_sfp(self, sfp_id):
-        """Deletes a service function path from the VIM
-        Returns the sfp ID (sfp_id) or raises an exception upon error or when sf is not found
-        """
-        raise vimconnNotImplemented( "SFC support not implemented" )
-
-# NOT USED METHODS in current version. Deprecated
-
-    @deprecated
-    def host_vim2gui(self, host, server_dict):
-        """Transform host dictionary from VIM format to GUI format,
-        and append to the server_dict
-        """
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    @deprecated
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    @deprecated
-    def get_hosts(self, vim_tenant):
-        """Get the hosts and deployed instances
-        Returns the hosts content"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    @deprecated
-    def get_processor_rankings(self):
-        """Get the processor rankings in the VIM database"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-    
-    @deprecated
-    def new_host(self, host_data):
-        """Adds a new host to VIM"""
-        """Returns status code of the VIM response"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-    
-    @deprecated
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM"""
-        """Returns the port identifier"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-        
-    @deprecated
-    def new_external_network(self,net_name,net_type):
-        """Adds a external network to VIM (shared)"""
-        """Returns the network identifier"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-    @deprecated
-
-    @deprecated
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network"""
-        """Returns status code of the VIM response"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-
-    @deprecated
-    def new_vminstancefromJSON(self, vm_data):
-        """Adds a VM instance to VIM"""
-        """Returns the instance identifier"""
-        raise vimconnNotImplemented( "Should have implemented this" )
-
diff --git a/osm_ro/vimconn_aws.py b/osm_ro/vimconn_aws.py
deleted file mode 100644 (file)
index bcd8cbc..0000000
+++ /dev/null
@@ -1,807 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2017 xFlow Research Pvt. Ltd
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: saboor.ahmad@xflowresearch.com
-##
-
-'''
-AWS-connector implements all the methods to interact with AWS using the BOTO client
-'''
-
-__author__ = "Saboor Ahmad"
-__date__ = "10-Apr-2017"
-
-import vimconn
-import yaml
-import logging
-import netaddr
-import time
-
-try:
-    import boto
-    import boto.ec2
-    import boto.vpc
-except:
-    exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
-
-
-class vimconnector(vimconn.vimconnector):
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
-                 config={}, persistent_info={}):
-        """ Params: uuid - id asigned to this VIM
-                name - name assigned to this VIM, can be used for logging
-                tenant_id - ID to be used for tenant
-                tenant_name - name of tenant to be used VIM tenant to be used
-                url_admin - optional, url used for administrative tasks
-                user - credentials of the VIM user
-                passwd - credentials of the VIM user
-                log_level - if must use a different log_level than the general one
-                config - dictionary with misc VIM information
-                    region_name - name of region to deploy the instances
-                    vpc_cidr_block - default CIDR block for VPC
-                    security_groups - default security group to specify this instance
-                persistent_info - dict where the class can store information that will be available among class
-                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
-                    empty dict. Useful to store login/tokens information for speed up communication
-        """
-
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
-                                      config, persistent_info)
-
-        self.persistent_info = persistent_info
-        self.a_creds = {}
-        if user:
-            self.a_creds['aws_access_key_id'] = user
-        else:
-            raise vimconn.vimconnAuthException("Username is not specified")
-        if passwd:
-            self.a_creds['aws_secret_access_key'] = passwd
-        else:
-            raise vimconn.vimconnAuthException("Password is not specified")
-        if 'region_name' in config:
-            self.region = config.get('region_name')
-        else:
-            raise vimconn.vimconnException("AWS region_name is not specified at config")
-
-        self.vpc_data = {}
-        self.subnet_data = {}
-        self.conn = None
-        self.conn_vpc = None
-        self.account_id = None
-
-        self.vpc_id = self.get_tenant_list()[0]['id']
-        # we take VPC CIDR block if specified, otherwise we use the default CIDR
-        # block suggested by AWS while creating instance
-        self.vpc_cidr_block = '10.0.0.0/24'
-
-        if tenant_id:
-            self.vpc_id = tenant_id
-        if 'vpc_cidr_block' in config:
-            self.vpc_cidr_block = config['vpc_cidr_block']
-
-        self.security_groups = None
-        if 'security_groups' in config:
-            self.security_groups = config['security_groups']
-
-        self.key_pair = None
-        if 'key_pair' in config:
-            self.key_pair = config['key_pair']
-
-        self.flavor_info = None
-        if 'flavor_info' in config:
-            flavor_data = config.get('flavor_info')
-            if isinstance(flavor_data, str):
-                try:
-                    if flavor_data[0] == "@":  # read from a file
-                        with open(flavor_data[1:], 'r') as stream:
-                            self.flavor_info = yaml.load(stream)
-                    else:
-                        self.flavor_info = yaml.load(flavor_data)
-                except yaml.YAMLError as e:
-                    self.flavor_info = None
-                    raise vimconn.vimconnException("Bad format at file '{}': {}".format(flavor_data[1:], e))
-                except IOError as e:
-                    raise vimconn.vimconnException("Error reading file '{}': {}".format(flavor_data[1:], e))
-            elif isinstance(flavor_data, dict):
-                self.flavor_info = flavor_data
-
-        self.logger = logging.getLogger('openmano.vim.aws')
-        if log_level:
-            self.logger.setLevel(getattr(logging, log_level))
-
-    def __setitem__(self, index, value):
-        """Params: index - name of value of set
-                   value - value to set
-        """
-        if index == 'user':
-            self.a_creds['aws_access_key_id'] = value
-        elif index == 'passwd':
-            self.a_creds['aws_secret_access_key'] = value
-        elif index == 'region':
-            self.region = value
-        else:
-            vimconn.vimconnector.__setitem__(self, index, value)
-
-    def _reload_connection(self):
-        """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services
-        """
-
-        try:
-            self.conn = boto.ec2.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
-                                                   aws_secret_access_key=self.a_creds['aws_secret_access_key'])
-            self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
-                                                       aws_secret_access_key=self.a_creds['aws_secret_access_key'])
-            # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], aws_secret_access_key=self.a_creds['aws_secret_access_key'])
-            # self.account_id = client.get_caller_identity()["Account"]
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def format_vimconn_exception(self, e):
-        """Params: an Exception object
-        Returns: Raises the exception 'e' passed in mehtod parameters
-        """
-
-        self.conn = None
-        self.conn_vpc = None
-        raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e))
-
-    def get_availability_zones_list(self):
-        """Obtain AvailabilityZones from AWS
-        """
-
-        try:
-            self._reload_connection()
-            az_list = []
-            for az in self.conn.get_all_zones():
-                az_list.append(az.name)
-            return az_list
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_tenant_list(self, filter_dict={}):
-        """Obtain tenants of VIM
-        filter_dict dictionary that can contain the following keys:
-            name: filter by tenant name
-            id: filter by tenant uuid/id
-            <other VIM specific>
-        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
-            [{'name':'<name>, 'id':'<id>, ...}, ...]
-        """
-
-        try:
-            self._reload_connection()
-            vpc_ids = []
-            tfilters = {}
-            if filter_dict != {}:
-                if 'id' in filter_dict:
-                    vpc_ids.append(filter_dict['id'])
-                    tfilters['name'] = filter_dict['id']
-            tenants = self.conn_vpc.get_all_vpcs(vpc_ids, tfilters)
-            tenant_list = []
-            for tenant in tenants:
-                tenant_list.append({'id': str(tenant.id), 'name': str(tenant.id), 'status': str(tenant.state),
-                                    'cidr_block': str(tenant.cidr_block)})
-            return tenant_list
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def new_tenant(self, tenant_name, tenant_description):
-        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
-        "tenant_name": string max lenght 64
-        "tenant_description": string max length 256
-        returns the tenant identifier or raise exception
-        """
-
-        self.logger.debug("Adding a new VPC")
-        try:
-            self._reload_connection()
-            vpc = self.conn_vpc.create_vpc(self.vpc_cidr_block)
-            self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_support=True)
-            self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_hostnames=True)
-
-            gateway = self.conn_vpc.create_internet_gateway()
-            self.conn_vpc.attach_internet_gateway(gateway.id, vpc.id)
-            route_table = self.conn_vpc.create_route_table(vpc.id)
-            self.conn_vpc.create_route(route_table.id, '0.0.0.0/0', gateway.id)
-
-            self.vpc_data[vpc.id] = {'gateway': gateway.id, 'route_table': route_table.id,
-                                     'subnets': self.subnet_sizes(len(self.get_availability_zones_list()),
-                                                                  self.vpc_cidr_block)}
-            return vpc.id
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def delete_tenant(self, tenant_id):
-        """Delete a tenant from VIM
-        tenant_id: returned VIM tenant_id on "new_tenant"
-        Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
-        """
-
-        self.logger.debug("Deleting specified VPC")
-        try:
-            self._reload_connection()
-            vpc = self.vpc_data.get(tenant_id)
-            if 'gateway' in vpc and 'route_table' in vpc:
-                gateway_id, route_table_id = vpc['gateway'], vpc['route_table']
-                self.conn_vpc.detach_internet_gateway(gateway_id, tenant_id)
-                self.conn_vpc.delete_vpc(tenant_id)
-                self.conn_vpc.delete_route(route_table_id, '0.0.0.0/0')
-            else:
-                self.conn_vpc.delete_vpc(tenant_id)
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def subnet_sizes(self, availability_zones, cidr):
-        """Calcualtes possible subnets given CIDR value of VPC
-        """
-
-        if availability_zones != 2 and availability_zones != 3:
-            self.logger.debug("Number of AZs should be 2 or 3")
-            raise vimconn.vimconnNotSupportedException("Number of AZs should be 2 or 3")
-
-        netmasks = ('255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128')
-        ip = netaddr.IPNetwork(cidr)
-        mask = ip.netmask
-
-        if str(mask) not in netmasks:
-            self.logger.debug("Netmask " + str(mask) + " not found")
-            raise vimconn.vimconnNotFoundException("Netmask " + str(mask) + " not found")
-
-        if availability_zones == 2:
-            for n, netmask in enumerate(netmasks):
-                if str(mask) == netmask:
-                    subnets = list(ip.subnet(n + 24))
-        else:
-            for n, netmask in enumerate(netmasks):
-                if str(mask) == netmask:
-                    pub_net = list(ip.subnet(n + 24))
-                    pri_subs = pub_net[1:]
-                    pub_mask = pub_net[0].netmask
-            pub_split = list(ip.subnet(26)) if (str(pub_mask) == '255.255.255.0') else list(ip.subnet(27))
-            pub_subs = pub_split[:3]
-            subnets = pub_subs + pri_subs
-
-        return map(str, subnets)
-
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
-                'ip-version': can be one of ["IPv4","IPv6"]
-                'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway-address': (Optional) ip_schema, that is X.X.X.X
-                'dns-address': (Optional) ip_schema,
-                'dhcp': (Optional) dict containing
-                    'enabled': {"type": "boolean"},
-                    'start-address': ip_schema, first IP to grant
-                    'count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-
-        self.logger.debug("Adding a subnet to VPC")
-        try:
-            created_items = {}
-            self._reload_connection()
-            subnet = None
-            vpc_id = self.vpc_id
-            if self.vpc_data.get(vpc_id, None):
-                cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0]
-            else:
-                vpc = self.get_tenant_list({'id': vpc_id})[0]
-                subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block'])
-                cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, detail='cidr_block')))[0]
-            subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block)
-            return subnet.id, created_items
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_network_details(self, filters, detail):
-        """Get specified details related to a subnet
-        """
-        detail_list = []
-        subnet_list = self.get_network_list(filters)
-        for net in subnet_list:
-            detail_list.append(net[detail])
-        return detail_list
-
-    def get_network_list(self, filter_dict={}):
-        """Obtain tenant networks of VIM
-        Params:
-            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
-                name: string  => returns only networks with this name
-                id:   string  => returns networks with this VIM id, this imply returns one network at most
-                shared: boolean >= returns only networks that are (or are not) shared
-                tenant_id: sting => returns only networks that belong to this tenant/project
-                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
-                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
-        Returns the network list of dictionaries. each dictionary contains:
-            'id': (mandatory) VIM network id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-
-        self.logger.debug("Getting all subnets from VIM")
-        try:
-            self._reload_connection()
-            tfilters = {}
-            if filter_dict != {}:
-                if 'tenant_id' in filter_dict:
-                    tfilters['vpcId'] = filter_dict['tenant_id']
-            subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters)
-            net_list = []
-            for net in subnets:
-                net_list.append(
-                    {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id),
-                     'cidr_block': str(net.cidr_block), 'type': 'bridge'})
-            return net_list
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_network(self, net_id):
-        """Obtain network details from the 'net_id' VIM network
-        Return a dict that contains:
-            'id': (mandatory) VIM network id, that is, net_id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        Raises an exception upon error or when network is not found
-        """
-
-        self.logger.debug("Getting Subnet from VIM")
-        try:
-            self._reload_connection()
-            subnet = self.conn_vpc.get_all_subnets(net_id)[0]
-            return {'id': str(subnet.id), 'name': str(subnet.id), 'status': str(subnet.state),
-                    'vpc_id': str(subnet.vpc_id), 'cidr_block': str(subnet.cidr_block)}
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def delete_network(self, net_id, created_items=None):
-        """
-        Removes a tenant network from VIM and its associated elements
-        :param net_id: VIM identifier of the network, provided by method new_network
-        :param created_items: dictionary with extra items to be deleted. provided by method new_network
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-
-        self.logger.debug("Deleting subnet from VIM")
-        try:
-            self._reload_connection()
-            self.logger.debug("DELETING NET_ID: " + str(net_id))
-            self.conn_vpc.delete_subnet(net_id)
-            return net_id
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def refresh_nets_status(self, net_list):
-        """Get the status of the networks
-        Params:
-            'net_list': a list with the VIM network id to be get the status
-        Returns a dictionary with:
-            'net_id':         #VIM id of this network
-                status:     #Mandatory. Text with one of:
-                    #  DELETED (not found at vim)
-                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
-                    #  OTHER (Vim reported other status not understood)
-                    #  ERROR (VIM indicates an ERROR status)
-                    #  ACTIVE, INACTIVE, DOWN (admin down),
-                    #  BUILD (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-            'net_id2': ...
-        """
-
-        self._reload_connection()
-        try:
-            dict_entry = {}
-            for net_id in net_list:
-                subnet_dict = {}
-                subnet = None
-                try:
-                    subnet = self.conn_vpc.get_all_subnets(net_id)[0]
-                    if subnet.state == "pending":
-                        subnet_dict['status'] = "BUILD"
-                    elif subnet.state == "available":
-                        subnet_dict['status'] = 'ACTIVE'
-                    else:
-                        subnet_dict['status'] = 'ERROR'
-                    subnet_dict['error_msg'] = ''
-                except Exception as e:
-                    subnet_dict['status'] = 'DELETED'
-                    subnet_dict['error_msg'] = 'Network not found'
-                finally:
-                    try:
-                        subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256)
-                    except yaml.YAMLError as e:
-                        subnet_dict['vim_info'] = str(subnet)
-                dict_entry[net_id] = subnet_dict
-            return dict_entry
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_flavor(self, flavor_id):
-        """Obtain flavor details from the VIM
-        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
-        Raises an exception upon error or if not found
-        """
-
-        self.logger.debug("Getting instance type")
-        try:
-            if flavor_id in self.flavor_info:
-                return self.flavor_info[flavor_id]
-            else:
-                raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name")
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_flavor_id_from_data(self, flavor_dict):
-        """Obtain flavor id that match the flavor description
-        Params:
-            'flavor_dict': dictionary that contains:
-                'disk': main hard disk in GB
-                'ram': memory in MB
-                'vcpus': number of virtual cpus
-                #todo: complete parameters for EPA
-        Returns the flavor_id or raises a vimconnNotFoundException
-        """
-
-        self.logger.debug("Getting flavor id from data")
-        try:
-            flavor = None
-            for key, values in self.flavor_info.iteritems():
-                if (values["ram"], values["cpus"], values["disk"]) == (
-                flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
-                    flavor = (key, values)
-                    break
-                elif (values["ram"], values["cpus"], values["disk"]) >= (
-                flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
-                    if not flavor:
-                        flavor = (key, values)
-                    else:
-                        if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= (
-                        values["ram"], values["cpus"], values["disk"]):
-                            flavor = (key, values)
-            if flavor:
-                return flavor[0]
-            raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name")
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def new_image(self, image_dict):
-        """ Adds a tenant image to VIM
-        Params: image_dict
-                    name (string) - The name of the AMI. Valid only for EBS-based images.
-                    description (string) - The description of the AMI.
-                    image_location (string) - Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI’s.
-                    architecture (string) - The architecture of the AMI. Valid choices are: * i386 * x86_64
-                    kernel_id (string) -  The ID of the kernel with which to launch the instances
-                    root_device_name (string) - The root device name (e.g. /dev/sdh)
-                    block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) - A BlockDeviceMapping data structure describing the EBS volumes associated with the Image.
-                    virtualization_type (string) - The virutalization_type of the image. Valid choices are: * paravirtual * hvm
-                    sriov_net_support (string) - Advanced networking support. Valid choices are: * simple
-                    snapshot_id (string) - A snapshot ID for the snapshot to be used as root device for the image. Mutually exclusive with block_device_map, requires root_device_name
-                    delete_root_volume_on_termination (bool) - Whether to delete the root volume of the image after instance termination. Only applies when creating image from snapshot_id. Defaults to False. Note that leaving volumes behind after instance termination is not free
-        Returns: image_id - image ID of the newly created image
-        """
-
-        try:
-            self._reload_connection()
-            image_location = image_dict.get('image_location', None)
-            if image_location:
-                image_location = str(self.account_id) + str(image_location)
-
-            image_id = self.conn.register_image(image_dict.get('name', None), image_dict.get('description', None),
-                                                image_location, image_dict.get('architecture', None),
-                                                image_dict.get('kernel_id', None),
-                                                image_dict.get('root_device_name', None),
-                                                image_dict.get('block_device_map', None),
-                                                image_dict.get('virtualization_type', None),
-                                                image_dict.get('sriov_net_support', None),
-                                                image_dict.get('snapshot_id', None),
-                                                image_dict.get('delete_root_volume_on_termination', None))
-            return image_id
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def delete_image(self, image_id):
-        """Deletes a tenant image from VIM
-        Returns the image_id if image is deleted or raises an exception on error"""
-
-        try:
-            self._reload_connection()
-            self.conn.deregister_image(image_id)
-            return image_id
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_image_id_from_path(self, path):
-        '''
-        Params: path - location of the image
-        Returns: image_id - ID of the matching image
-        '''
-        self._reload_connection()
-        try:
-            filters = {}
-            if path:
-                tokens = path.split('/')
-                filters['owner_id'] = tokens[0]
-                filters['name'] = '/'.join(tokens[1:])
-            image = self.conn.get_all_images(filters=filters)[0]
-            return image.id
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_image_list(self, filter_dict={}):
-        """Obtain tenant images from VIM
-        Filter_dict can be:
-            name: image name
-            id: image uuid
-            checksum: image checksum
-            location: image path
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        """
-
-        self.logger.debug("Getting image list from VIM")
-        try:
-            self._reload_connection()
-            image_id = None
-            filters = {}
-            if 'id' in filter_dict:
-                image_id = filter_dict['id']
-            if 'name' in filter_dict:
-                filters['name'] = filter_dict['name']
-            if 'location' in filter_dict:
-                filters['location'] = filter_dict['location']
-            # filters['image_type'] = 'machine'
-            # filter_dict['owner_id'] = self.account_id
-            images = self.conn.get_all_images(image_id, filters=filters)
-            image_list = []
-            for image in images:
-                image_list.append({'id': str(image.id), 'name': str(image.name), 'status': str(image.state),
-                                   'owner': str(image.owner_id), 'location': str(image.location),
-                                   'is_public': str(image.is_public), 'architecture': str(image.architecture),
-                                   'platform': str(image.platform)})
-            return image_list
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
-                       disk_list=None, availability_zone_index=None, availability_zone_list=None):
-        """Create a new VM/instance in AWS
-        Params: name
-                decription
-                start: (boolean) indicates if VM must start or created in pause mode.
-                image_id - image ID in AWS
-                flavor_id - instance type ID in AWS
-                net_list
-                    name
-                    net_id - subnet_id from AWS
-                    vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
-                    model: (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
-                    mac_address: (optional) mac address to assign to this interface
-                    type: (mandatory) can be one of:
-                        virtual, in this case always connected to a network of type 'net_type=bridge'
-                        'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
-                           can created unconnected
-                        'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
-                        VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
-                            are allocated on the same physical NIC
-                    bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
-                    port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing or True, it must apply the default VIM behaviour
-                    vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this interface. 'net_list' is modified
-                    elastic_ip - True/False to define if an elastic_ip is required
-                cloud_config': (optional) dictionary with:
-                    key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                    users': (optional) list of users to be inserted, each item is a dict with:
-                        name': (mandatory) user name,
-                        key-pairs': (optional) list of strings with the public key to be inserted to the user
-                    user-data': (optional) string is a text script to be passed directly to cloud-init
-                    config-files': (optional). List of files to be transferred. Each item is a dict with:
-                        dest': (mandatory) string with the destination absolute path
-                        encoding': (optional, by default text). Can be one of:
-                            b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                        content' (mandatory): string with the content of the file
-                        permissions': (optional) string with file permissions, typically octal notation '0644'
-                        owner: (optional) file owner, string with the format 'owner:group'
-                    boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
-                    security-groups:
-                        subnet_id
-                        security_group_id
-                disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                    image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                    size': (mandatory) string with the size of the disk in GB
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-
-        self.logger.debug("Creating a new VM instance")
-        try:
-            self._reload_connection()
-            instance = None
-            _, userdata = self._create_user_data(cloud_config)
-
-            if not net_list:
-                reservation = self.conn.run_instances(
-                    image_id,
-                    key_name=self.key_pair,
-                    instance_type=flavor_id,
-                    security_groups=self.security_groups,
-                    user_data=userdata
-                )
-            else:
-                for index, subnet in enumerate(net_list):
-                    net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnet.get('net_id'),
-                                                                                       groups=None,
-                                                                                       associate_public_ip_address=True)
-
-                    if subnet.get('elastic_ip'):
-                        eip = self.conn.allocate_address()
-                        self.conn.associate_address(allocation_id=eip.allocation_id, network_interface_id=net_intr.id)
-
-                    if index == 0:
-                        reservation = self.conn.run_instances(
-                            image_id,
-                            key_name=self.key_pair,
-                            instance_type=flavor_id,
-                            security_groups=self.security_groups,
-                            network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
-                            user_data=userdata
-                        )
-                    else:
-                        while True:
-                            try:
-                                self.conn.attach_network_interface(
-                                    network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
-                                    instance_id=instance.id, device_index=0)
-                                break
-                            except:
-                                time.sleep(10)
-                    net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id
-
-            instance = reservation.instances[0]
-            return instance.id, None
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_vminstance(self, vm_id):
-        """Returns the VM instance information from VIM"""
-
-        try:
-            self._reload_connection()
-            reservation = self.conn.get_all_instances(vm_id)
-            return reservation[0].instances[0].__dict__
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def delete_vminstance(self, vm_id, created_items=None):
-        """Removes a VM instance from VIM
-        Returns the instance identifier"""
-
-        try:
-            self._reload_connection()
-            self.logger.debug("DELETING VM_ID: " + str(vm_id))
-            self.conn.terminate_instances(vm_id)
-            return vm_id
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def refresh_vms_status(self, vm_list):
-        """ Get the status of the virtual machines and their interfaces/ports
-        Params: the list of VM identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this Virtual Machine
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                            #  BUILD (on building process), ERROR
-                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                            #
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                interfaces: list with interface info. Each item a dictionary with:
-                    vim_interface_id -  The ID of the ENI.
-                    vim_net_id - The ID of the VPC subnet.
-                    mac_address - The MAC address of the interface.
-                    ip_address - The IP address of the interface within the subnet.
-        """
-        self.logger.debug("Getting VM instance information from VIM")
-        try:
-            self._reload_connection()
-            reservation = self.conn.get_all_instances(vm_list)[0]
-            instances = {}
-            instance_dict = {}
-            for instance in reservation.instances:
-                try:
-                    if instance.state in ("pending"):
-                        instance_dict['status'] = "BUILD"
-                    elif instance.state in ("available", "running", "up"):
-                        instance_dict['status'] = 'ACTIVE'
-                    else:
-                        instance_dict['status'] = 'ERROR'
-                    instance_dict['error_msg'] = ""
-                    instance_dict['interfaces'] = []
-                    interface_dict = {}
-                    for interface in instance.interfaces:
-                        interface_dict['vim_interface_id'] = interface.id
-                        interface_dict['vim_net_id'] = interface.subnet_id
-                        interface_dict['mac_address'] = interface.mac_address
-                        if hasattr(interface, 'publicIp') and interface.publicIp != None:
-                            interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address
-                        else:
-                            interface_dict['ip_address'] = interface.private_ip_address
-                        instance_dict['interfaces'].append(interface_dict)
-                except Exception as e:
-                    self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
-                    instance_dict['status'] = "DELETED"
-                    instance_dict['error_msg'] = str(e)
-                finally:
-                    try:
-                        instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256)
-                    except yaml.YAMLError as e:
-                        # self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
-                        instance_dict['vim_info'] = str(instance)
-                instances[instance.id] = instance_dict
-            return instances
-        except Exception as e:
-            self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
-            self.format_vimconn_exception(e)
-
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        """Send and action over a VM instance from VIM
-        Returns the vm_id if the action was successfully sent to the VIM"""
-
-        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
-        try:
-            self._reload_connection()
-            if "start" in action_dict:
-                self.conn.start_instances(vm_id)
-            elif "stop" in action_dict or "stop" in action_dict:
-                self.conn.stop_instances(vm_id)
-            elif "terminate" in action_dict:
-                self.conn.terminate_instances(vm_id)
-            elif "reboot" in action_dict:
-                self.conn.reboot_instances(vm_id)
-            return None
-        except Exception as e:
-            self.format_vimconn_exception(e)
diff --git a/osm_ro/vimconn_azure.py b/osm_ro/vimconn_azure.py
deleted file mode 100755 (executable)
index 24a9878..0000000
+++ /dev/null
@@ -1,495 +0,0 @@
-# -*- coding: utf-8 -*-
-
-__author__='Sergio Gonzalez'
-__date__ ='$18-apr-2019 23:59:59$'
-
-import vimconn
-import logging
-
-from os import getenv
-from uuid import uuid4
-
-from azure.common.credentials import ServicePrincipalCredentials
-from azure.mgmt.resource import ResourceManagementClient
-from azure.mgmt.network import NetworkManagementClient
-from azure.mgmt.compute import ComputeManagementClient
-
-
-class vimconnector(vimconn.vimconnector):
-
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
-                 config={}, persistent_info={}):
-
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
-                                      config, persistent_info)
-
-        # LOGGER
-        self.logger = logging.getLogger('openmano.vim.azure')
-        if log_level:
-            logging.basicConfig()
-            self.logger.setLevel(getattr(logging, log_level))
-
-        # CREDENTIALS 
-        self.credentials = ServicePrincipalCredentials(
-            client_id=user,
-            secret=passwd,
-            tenant=(tenant_id or tenant_name)
-        )
-
-        # SUBSCRIPTION
-        if 'subscription_id' in config:
-            self.subscription_id = config.get('subscription_id')
-            self.logger.debug('Setting subscription '+str(self.subscription_id))
-        else:
-            raise vimconn.vimconnException('Subscription not specified')
-        # REGION
-        if 'region_name' in config:
-            self.region = config.get('region_name')
-        else:
-            raise vimconn.vimconnException('Azure region_name is not specified at config')
-        # RESOURCE_GROUP
-        if 'resource_group' in config:
-            self.resource_group = config.get('resource_group')
-        else:
-            raise vimconn.vimconnException('Azure resource_group is not specified at config')
-        # VNET_NAME
-        if 'vnet_name' in config:
-            self.vnet_name = config["vnet_name"]
-            
-        # public ssh key
-        self.pub_key = config.get('pub_key')
-            
-    def _reload_connection(self):
-        """
-        Sets connections to work with Azure service APIs
-        :return:
-        """
-        self.logger.debug('Reloading API Connection')
-        try:
-            self.conn = ResourceManagementClient(self.credentials, self.subscription_id)
-            self.conn_compute = ComputeManagementClient(self.credentials, self.subscription_id)
-            self.conn_vnet = NetworkManagementClient(self.credentials, self.subscription_id)
-            self._check_or_create_resource_group()
-            self._check_or_create_vnet()
-        except Exception as e:
-            self.format_vimconn_exception(e)            
-
-    def _get_resource_name_from_resource_id(self, resource_id):
-        return str(resource_id.split('/')[-1])
-
-    def _get_location_from_resource_group(self, resource_group_name):
-        return self.conn.resource_groups.get(resource_group_name).location
-        
-    def _get_resource_group_name_from_resource_id(self, resource_id):
-        return str(resource_id.split('/')[4])
-
-    def _check_subnets_for_vm(self, net_list):
-        # All subnets must belong to the same resource group and vnet
-        if len(set(self._get_resource_group_name_from_resource_id(net['id']) +
-                   self._get_resource_name_from_resource_id(net['id']) for net in net_list)) != 1:
-            raise self.format_vimconn_exception('Azure VMs can only attach to subnets in same VNET')
-
-    def format_vimconn_exception(self, e):
-        """
-        Params: an Exception object
-        :param e:
-        :return: Raises the proper vimconnException
-        """
-        self.conn = None
-        self.conn_vnet = None
-        raise vimconn.vimconnConnectionException(type(e).__name__ + ': ' + str(e))        
-
-    def _check_or_create_resource_group(self):
-        """
-        Creates a resource group in indicated region
-        :return: None
-        """
-        self.logger.debug('Creating RG {} in location {}'.format(self.resource_group, self.region))
-        self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region})
-
-    def _check_or_create_vnet(self):
-        try:
-            vnet_params = {
-                'location': self.region,
-                'address_space': {
-                    'address_prefixes': "10.0.0.0/8"
-                },
-            }
-            self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        """
-        Adds a tenant network to VIM
-        :param net_name: name of the network
-        :param net_type:
-        :param ip_profile: is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
-                'ip-version': can be one of ['IPv4','IPv6']
-                'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway-address': (Optional) ip_schema, that is X.X.X.X
-                'dns-address': (Optional) ip_schema,
-                'dhcp': (Optional) dict containing
-                    'enabled': {'type': 'boolean'},
-                    'start-address': ip_schema, first IP to grant
-                    'count': number of IPs to grant.
-        :param shared:
-        :param vlan:
-        :return: a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-
-        return self._new_subnet(net_name, ip_profile)
-
-    def _new_subnet(self, net_name, ip_profile):
-        """
-        Adds a tenant network to VIM. It creates a new VNET with a single subnet
-        :param net_name:
-        :param ip_profile:
-        :return:
-        """
-        self.logger.debug('Adding a subnet to VNET '+self.vnet_name)
-        self._reload_connection()
-
-        if ip_profile is None:
-            # TODO get a non used vnet ip range /24 and allocate automatically
-            raise vimconn.vimconnException('Azure cannot create VNET with no CIDR')
-
-        try:
-            vnet_params= {
-                'location': self.region,
-                'address_space': {
-                    'address_prefixes': [ip_profile['subnet_address']]
-                },
-                'subnets': [
-                    {
-                        'name': "{}-{}".format(net_name[:24], uuid4()),
-                        'address_prefix': ip_profile['subnet_address']
-                    }
-                ]
-            }
-            self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
-            # TODO return a tuple (subnet-ID, None)
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def _create_nic(self, subnet_id, nic_name, static_ip=None):
-        self._reload_connection()
-        
-        resource_group_name=self._get_resource_group_name_from_resource_id(subnet_id)
-        location = self._get_location_from_resource_group(resource_group_name)
-            
-        if static_ip:
-            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
-                resource_group_name,
-                nic_name,
-                {
-                    'location': location,
-                    'ip_configurations': [{
-                        'name': nic_name + 'ipconfiguration',
-                        'privateIPAddress': static_ip,
-                        'privateIPAllocationMethod': 'Static',
-                        'subnet': {
-                            'id': subnet_id
-                        }
-                    }]
-                }
-            )
-        else:
-            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
-                resource_group_name,
-                nic_name,
-                {
-                    'location': location,
-                    'ip_configurations': [{
-                        'name': nic_name + 'ipconfiguration',
-                        'subnet': {
-                            'id': subnet_id
-                        }
-                    }]
-                }
-            )
-
-        return async_nic_creation.result()
-
-    def get_image_list(self, filter_dict={}):
-        """
-        The urn contains for marketplace  'publisher:offer:sku:version'
-
-        :param filter_dict:
-        :return:
-        """
-        image_list = []
-
-        self._reload_connection()
-        if filter_dict.get("name"):
-            params = filter_dict["name"].split(":")
-            if len(params) >= 3:
-                publisher = params[0]
-                offer = params[1]
-                sku = params[2]
-                version = None
-                if len(params) == 4:
-                    version = params[3]
-                images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku)
-                for image in images:
-                    if version:
-                        image_version = str(image.id).split("/")[-1]
-                        if image_version != version:
-                            continue
-                    image_list.append({
-                        'id': str(image.id),
-                        'name': self._get_resource_name_from_resource_id(image.id)
-                    })
-                return image_list
-
-        images = self.conn_compute.virtual_machine_images.list()
-
-        for image in images:
-            # TODO implement filter_dict
-            if filter_dict:
-                if filter_dict.get("id") and str(image.id) != filter_dict["id"]:
-                    continue
-                if filter_dict.get("name") and \
-                        self._get_resource_name_from_resource_id(image.id) != filter_dict["name"]:
-                    continue
-                # TODO add checksum
-            image_list.append({
-                'id': str(image.id),
-                'name': self._get_resource_name_from_resource_id(image.id),
-            })
-        return image_list
-
-    def get_network_list(self, filter_dict={}):
-        """Obtain tenant networks of VIM
-        Filter_dict can be:
-            name: network name
-            id: network uuid
-            shared: boolean
-            tenant_id: tenant
-            admin_state_up: boolean
-            status: 'ACTIVE'
-        Returns the network list of dictionaries
-        """
-        self.logger.debug('Getting all subnets from VIM')
-        try:
-            self._reload_connection()
-            vnet = self.conn_vnet.virtual_networks.get(self.config["resource_group"], self.vnet_name)
-            subnet_list = []
-            
-            for subnet in vnet.subnets:
-                # TODO implement filter_dict
-                if filter_dict:
-                    if filter_dict.get("id") and str(subnet.id) != filter_dict["id"]:
-                        continue
-                    if filter_dict.get("name") and \
-                            self._get_resource_name_from_resource_id(subnet.id) != filter_dict["name"]:
-                        continue
-
-                subnet_list.append({
-                    'id': str(subnet.id),
-                     'name': self._get_resource_name_from_resource_id(subnet.id),
-                     'status': str(vnet.provisioning_state),  # TODO Does subnet contains status???
-                     'cidr_block': str(subnet.address_prefix)
-                    }
-                )
-            return subnet_list
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def new_vminstance(self, vm_name, description, start, image_id, flavor_id, net_list, cloud_config=None,
-                       disk_list=None, availability_zone_index=None, availability_zone_list=None):
-
-        return self._new_vminstance(vm_name, image_id, flavor_id, net_list)
-        
-    def _new_vminstance(self, vm_name, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
-                        availability_zone_index=None, availability_zone_list=None):
-        #Create NICs
-        self._check_subnets_for_vm(net_list)
-        vm_nics = []
-        for idx, net in enumerate(net_list):
-            subnet_id=net['subnet_id']
-            nic_name = vm_name + '-nic-'+str(idx)
-            vm_nic = self._create_nic(subnet_id, nic_name)
-            vm_nics.append({ 'id': str(vm_nic.id)})
-
-        try:
-            vm_parameters = {
-                'location': self.region,
-                'os_profile': {
-                    'computer_name': vm_name,  # TODO if vm_name cannot be repeated add uuid4() suffix
-                    'admin_username': 'sergio',  # TODO is it mandatory???
-                    'linuxConfiguration': {
-                        'disablePasswordAuthentication': 'true',
-                        'ssh': {
-                          'publicKeys': [
-                            {
-                              'path': '/home/sergio/.ssh/authorized_keys',
-                              'keyData': self.pub_key
-                            }
-                          ]
-                        }
-                    }                    
-                    
-                },
-                'hardware_profile': {
-                    'vm_size':flavor_id
-                },
-                'storage_profile': {
-                    'image_reference': image_id
-                },
-                'network_profile': {
-                    'network_interfaces': [
-                        vm_nics[0]
-                    ]
-                }
-            }
-            creation_result = self.conn_compute.virtual_machines.create_or_update(
-                self.resource_group, 
-                vm_name, 
-                vm_parameters
-            )
-            
-            run_command_parameters = {
-                'command_id': 'RunShellScript', # For linux, don't change it
-                'script': [
-                'date > /home/sergio/test.txt'
-                ]
-            }
-            poller = self.conn_compute.virtual_machines.run_command(
-                self.resource_group, 
-                vm_name, 
-                run_command_parameters
-            )
-            # TODO return a tuple (vm-ID, None)
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
-    def get_flavor_id_from_data(self, flavor_dict):
-        self.logger.debug("Getting flavor id from data")
-        self._reload_connection()
-        vm_sizes_list = [vm_size.serialize() for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)]
-
-        cpus = flavor_dict['vcpus']
-        memMB = flavor_dict['ram']
-
-        filteredSizes = [size for size in vm_sizes_list if size['numberOfCores'] > cpus and size['memoryInMB'] > memMB]
-        listedFilteredSizes = sorted(filteredSizes, key=lambda k: k['numberOfCores'])
-
-        return listedFilteredSizes[0]['name']
-
-    def check_vim_connectivity(self):
-        try:
-            self._reload_connection()
-            return True
-        except Exception as e:
-            raise vimconn.vimconnException("Connectivity issue with Azure API: {}".format(e))
-
-    def get_network(self, net_id):
-        resGroup = self._get_resource_group_name_from_resource_id(net_id)
-        resName = self._get_resource_name_from_resource_id(net_id)
-        
-        self._reload_connection()
-        vnet = self.conn_vnet.virtual_networks.get(resGroup, resName)
-
-        return vnet
-
-    def delete_network(self, net_id):
-        resGroup = self._get_resource_group_name_from_resource_id(net_id)
-        resName = self._get_resource_name_from_resource_id(net_id)
-        
-        self._reload_connection()
-        self.conn_vnet.virtual_networks.delete(resGroup, resName)
-
-    def delete_vminstance(self, vm_id):
-        resGroup = self._get_resource_group_name_from_resource_id(net_id)
-        resName = self._get_resource_name_from_resource_id(net_id)
-        
-        self._reload_connection()
-        self.conn_compute.virtual_machines.delete(resGroup, resName)
-
-    def get_vminstance(self, vm_id):
-        resGroup = self._get_resource_group_name_from_resource_id(net_id)
-        resName = self._get_resource_name_from_resource_id(net_id)
-        
-        self._reload_connection()
-        vm=self.conn_compute.virtual_machines.get(resGroup, resName)
-
-        return vm
-
-    def get_flavor(self, flavor_id):
-        self._reload_connection()
-        for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region):
-            if vm_size.name == flavor_id :
-                return vm_size
-
-
-# TODO refresh_nets_status ver estado activo
-# TODO refresh_vms_status  ver estado activo
-# TODO get_vminstance_console  for getting console
-
-if __name__ == "__main__":
-
-    # Making some basic test
-    vim_id='azure'
-    vim_name='azure'
-    needed_test_params = {
-        "client_id": "AZURE_CLIENT_ID",
-        "secret": "AZURE_SECRET",
-        "tenant": "AZURE_TENANT",
-        "resource_group": "AZURE_RESOURCE_GROUP",
-        "subscription_id": "AZURE_SUBSCRIPTION_ID",
-        "vnet_name": "AZURE_VNET_NAME",
-    }
-    test_params = {}
-
-    for param, env_var in needed_test_params.items():
-        value = getenv(env_var)
-        if not value:
-            raise Exception("Provide a valid value for env '{}'".format(env_var))
-        test_params[param] = value
-
-    config = {
-            'region_name': getenv("AZURE_REGION_NAME", 'westeurope'),
-            'resource_group': getenv("AZURE_RESOURCE_GROUP"),
-            'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"),
-            'pub_key': getenv("AZURE_PUB_KEY", None),
-            'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'),
-    }
-
-    virtualMachine = {
-        'name': 'sergio',
-        'description': 'new VM',
-        'status': 'running',
-        'image': {
-            'publisher': 'Canonical',
-            'offer': 'UbuntuServer',
-            'sku': '16.04.0-LTS',
-            'version': 'latest'
-        },
-        'hardware_profile': {
-            'vm_size': 'Standard_DS1_v2'
-        },
-        'networks': [
-            'sergio'
-        ]
-    }
-
-    vnet_config = {
-        'subnet_address': '10.1.2.0/24',
-        #'subnet_name': 'subnet-oam'
-    }
-    ###########################
-
-    azure = vimconnector(vim_id, vim_name, tenant_id=test_params["tenant"], tenant_name=None, url=None, url_admin=None,
-                         user=test_params["client_id"], passwd=test_params["secret"], log_level=None, config=config)
-
-    # azure.get_flavor_id_from_data("here")
-    # subnets=azure.get_network_list()
-    # azure.new_vminstance(virtualMachine['name'], virtualMachine['description'], virtualMachine['status'],
-    #                      virtualMachine['image'], virtualMachine['hardware_profile']['vm_size'], subnets)
-
-    azure.get_flavor("Standard_A11")
\ No newline at end of file
diff --git a/osm_ro/vimconn_fos.py b/osm_ro/vimconn_fos.py
deleted file mode 100644 (file)
index fd539cc..0000000
+++ /dev/null
@@ -1,879 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2019 ADLINK Technology Inc..
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-#
-
-"""
-Eclipse fog05 connector, implements methods to interact with fog05 using REST Client + REST Proxy
-
-Manages LXD containers on x86_64 by default, currently missing EPA and VF/PF
-Support config dict:
-    - arch : cpu architecture for the VIM
-    - hypervisor: virtualization technology supported by the VIM, can
-                can be one of: LXD, KVM, BARE, XEN, DOCKER, MCU
-                the selected VIM need to have at least a node with support
-                for the selected hypervisor
-
-"""
-__author__="Gabriele Baldoni"
-__date__ ="$13-may-2019 10:35:12$"
-
-import uuid
-import socket
-import struct
-import vimconn
-import random
-import yaml
-from functools import partial
-from fog05rest import FIMAPI
-from fog05rest import fimerrors
-
-
-class vimconnector(vimconn.vimconnector):
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
-                 config={}, persistent_info={}):
-        """Constructor of VIM
-        Params:
-            'uuid': id asigned to this VIM
-            'name': name assigned to this VIM, can be used for logging
-            'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
-            'url_admin': (optional), url used for administrative tasks
-            'user', 'passwd': credentials of the VIM user
-            'log_level': provider if it should use a different log_level than the general one
-            'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
-                    at creation and particular VIM config at teh attachment
-            'persistent_info': dict where the class can store information that will be available among class
-                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
-                    empty dict. Useful to store login/tokens information for speed up communication
-
-        Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
-            check against the VIM
-        """
-
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
-                                      config, persistent_info)
-
-        self.logger.debug('vimconn_fos init with config: {}'.format(config))
-        self.arch = config.get('arch', 'x86_64')
-        self.hv = config.get('hypervisor', 'LXD')
-        self.nodes = config.get('nodes', [])
-        self.fdu_node_map = {}
-        self.fos_api = FIMAPI(locator=self.url)
-
-
-    def __get_ip_range(self, first, count):
-        int_first = struct.unpack('!L', socket.inet_aton(first))[0]
-        int_last = int_first + count
-        last = socket.inet_ntoa(struct.pack('!L', int_last))
-        return (first, last)
-
-    def __name_filter(self, desc, filter_name=None):
-        if filter_name is None:
-            return True
-        return desc.get('name') == filter_name
-
-    def __id_filter(self, desc, filter_id=None):
-        if filter_id is None:
-            return True
-        return desc.get('uuid') == filter_id
-
-    def __checksum_filter(self, desc, filter_checksum=None):
-        if filter_checksum is None:
-            return True
-        return desc.get('checksum') == filter_checksum
-
-    def check_vim_connectivity(self):
-        """Checks VIM can be reached and user credentials are ok.
-        Returns None if success or raised vimconnConnectionException, vimconnAuthException, ...
-        """
-        try:
-            self.fos_api.check()
-            return None
-        except fimerrors.FIMAuthExcetpion as fae:
-            raise vimconn.vimconnAuthException("Unable to authenticate to the VIM. Error {}".format(fae))
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network
-                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
-                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway_address': (Optional) ip_schema, that is X.X.X.X
-                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
-                'dhcp_enabled': True or False
-                'dhcp_start_address': ip_schema, first IP to grant
-                'dhcp_count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns the network identifier on success or raises and exception on failure
-        """
-        self.logger.debug('new_network: {}'.format(locals()))
-        if net_type in ['data','ptp']:
-            raise vimconn.vimconnNotImplemented('{} type of network not supported'.format(net_type))
-
-        net_uuid = '{}'.format(uuid.uuid4())
-        desc = {
-            'uuid':net_uuid,
-            'name':net_name,
-            'net_type':'ELAN',
-            'is_mgmt':False
-            }
-
-        if ip_profile is not None:
-            ip = {}
-            if ip_profile.get('ip_version') == 'IPv4':
-                ip_info = {}
-                ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count'))
-                dhcp_range = '{},{}'.format(ip_range[0],ip_range[1])
-                ip.update({'subnet':ip_profile.get('subnet_address')})
-                ip.update({'dns':ip_profile.get('dns', None)})
-                ip.update({'dhcp_enable':ip_profile.get('dhcp_enabled', False)})
-                ip.update({'dhcp_range': dhcp_range})
-                ip.update({'gateway':ip_profile.get('gateway_address', None)})
-                desc.update({'ip_configuration':ip_info})
-            else:
-                raise vimconn.vimconnNotImplemented('IPV6 network is not implemented at VIM')
-            desc.update({'ip_configuration':ip})
-        self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc))
-        try:
-            self.fos_api.network.add_network(desc)
-        except fimerrors.FIMAResouceExistingException as free:
-            raise vimconn.vimconnConflictException("Network already exists at VIM. Error {}".format(free))
-        except Exception as e:
-            raise vimconn.vimconnException("Unable to create network {}. Error {}".format(net_name, e))
-            # No way from the current rest service to get the actual error, most likely it will be an already existing error
-        return net_uuid
-
-    def get_network_list(self, filter_dict={}):
-        """Obtain tenant networks of VIM
-        Params:
-            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
-                name: string  => returns only networks with this name
-                id:   string  => returns networks with this VIM id, this imply returns one network at most
-                shared: boolean >= returns only networks that are (or are not) shared
-                tenant_id: sting => returns only networks that belong to this tenant/project
-                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
-                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
-        Returns the network list of dictionaries. each dictionary contains:
-            'id': (mandatory) VIM network id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
-            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        self.logger.debug('get_network_list: {}'.format(filter_dict))
-        res = []
-        try:
-            nets = self.fos_api.network.list()
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("Cannot get network list from VIM, connection error. Error {}".format(e))
-
-        filters = [
-            partial(self.__name_filter, filter_name=filter_dict.get('name')),
-            partial(self.__id_filter,filter_id=filter_dict.get('id'))
-        ]
-
-        r1 = []
-
-        for n in nets:
-            match = True
-            for f in filters:
-                match = match and f(n)
-            if match:
-                r1.append(n)
-
-        for n in r1:
-            osm_net = {
-                'id':n.get('uuid'),
-                'name':n.get('name'),
-                'status':'ACTIVE'
-            }
-            res.append(osm_net)
-        return res
-
-    def get_network(self, net_id):
-        """Obtain network details from the 'net_id' VIM network
-        Return a dict that contains:
-            'id': (mandatory) VIM network id, that is, net_id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        Raises an exception upon error or when network is not found
-        """
-        self.logger.debug('get_network: {}'.format(net_id))
-        res = self.get_network_list(filter_dict={'id':net_id})
-        if len(res) == 0:
-            raise vimconn.vimconnNotFoundException("Network {} not found at VIM".format(net_id))
-        return res[0]
-
-    def delete_network(self, net_id):
-        """Deletes a tenant network from VIM
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-        self.logger.debug('delete_network: {}'.format(net_id))
-        try:
-            self.fos_api.network.remove_network(net_id)
-        except fimerrors.FIMNotFoundException as fnfe:
-            raise vimconn.vimconnNotFoundException("Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe))
-        except Exception as e:
-            raise vimconn.vimconnException("Cannot delete network {} from VIM. Error {}".format(net_id, e))
-        return net_id
-
-    def refresh_nets_status(self, net_list):
-        """Get the status of the networks
-        Params:
-            'net_list': a list with the VIM network id to be get the status
-        Returns a dictionary with:
-            'net_id':         #VIM id of this network
-                status:     #Mandatory. Text with one of:
-                    #  DELETED (not found at vim)
-                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
-                    #  OTHER (Vim reported other status not understood)
-                    #  ERROR (VIM indicates an ERROR status)
-                    #  ACTIVE, INACTIVE, DOWN (admin down),
-                    #  BUILD (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-            'net_id2': ...
-        """
-        self.logger.debug('Refeshing network status with args: {}'.format(locals()))
-        r = {}
-        for n in net_list:
-            try:
-                osm_n = self.get_network(n)
-                r.update({
-                    osm_n.get('id'):{'status':osm_n.get('status')}
-                })
-            except vimconn.vimconnNotFoundException:
-                r.update({
-                    n:{'status':'VIM_ERROR'}
-                })
-        return r
-
-    def get_flavor(self, flavor_id):
-        """Obtain flavor details from the VIM
-        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
-        Raises an exception upon error or if not found
-        """
-        self.logger.debug('VIM get_flavor with args: {}'.format(locals()))
-        try:
-            r = self.fos_api.flavor.get(flavor_id)
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        if r is None:
-            raise vimconn.vimconnNotFoundException("Flavor not found at VIM")
-        return {'id':r.get('uuid'), 'name':r.get('name'), 'fos':r}
-
-    def get_flavor_id_from_data(self, flavor_dict):
-        """Obtain flavor id that match the flavor description
-        Params:
-            'flavor_dict': dictionary that contains:
-                'disk': main hard disk in GB
-                'ram': meomry in MB
-                'vcpus': number of virtual cpus
-                #TODO: complete parameters for EPA
-        Returns the flavor_id or raises a vimconnNotFoundException
-        """
-        self.logger.debug('VIM get_flavor_id_from_data with args : {}'.format(locals()))
-
-        try:
-            flvs = self.fos_api.flavor.list()
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and x.get('ram_size_mb') == flavor_dict.get('ram') and x.get('storage_size_gb') == flavor_dict.get('disk'))]
-        if len(r) == 0:
-            raise vimconn.vimconnNotFoundException ( "No flavor found" )
-        return r[0]
-
-    def new_flavor(self, flavor_data):
-        """Adds a tenant flavor to VIM
-            flavor_data contains a dictionary with information, keys:
-                name: flavor name
-                ram: memory (cloud type) in MBytes
-                vpcus: cpus (cloud type)
-                extended: EPA parameters
-                  - numas: #items requested in same NUMA
-                        memory: number of 1G huge pages memory
-                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
-                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
-                          - name: interface name
-                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
-                            bandwidth: X Gbps; requested guarantee bandwidth
-                            vpci: requested virtual PCI address
-                disk: disk size
-                is_public:
-                 #TODO to concrete
-        Returns the flavor identifier"""
-        self.logger.debug('VIM new_flavor with args: {}'.format(locals()))
-        flv_id = '{}'.format(uuid.uuid4())
-        desc = {
-            'uuid':flv_id,
-            'name':flavor_data.get('name'),
-            'cpu_arch': self.arch,
-            'cpu_min_count': flavor_data.get('vcpus'),
-            'cpu_min_freq': 0.0,
-            'ram_size_mb':float(flavor_data.get('ram')),
-            'storage_size_gb':float(flavor_data.get('disk'))
-        }
-        try:
-            self.fos_api.flavor.add(desc)
-        except fimerrors.FIMAResouceExistingException as free:
-            raise vimconn.vimconnConflictException("Flavor {} already exist at VIM. Error {}".format(flv_id, free))
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        return flv_id
-
-
-    def delete_flavor(self, flavor_id):
-        """Deletes a tenant flavor from VIM identify by its id
-        Returns the used id or raise an exception"""
-        try:
-            self.fos_api.flavor.remove(flavor_id)
-        except fimerrors.FIMNotFoundException as fnfe:
-            raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe))
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        return flavor_id
-
-    def new_image(self, image_dict):
-        """ Adds a tenant image to VIM. imge_dict is a dictionary with:
-            name: name
-            disk_format: qcow2, vhd, vmdk, raw (by default), ...
-            location: path or URI
-            public: "yes" or "no"
-            metadata: metadata of the image
-        Returns the image id or raises an exception if failed
-        """
-        self.logger.debug('VIM new_image with args: {}'.format(locals()))
-        img_id = '{}'.format(uuid.uuid4())
-        desc = {
-            'name':image_dict.get('name'),
-            'uuid':img_id,
-            'uri':image_dict.get('location')
-        }
-        try:
-            self.fos_api.image.add(desc)
-        except fimerrors.FIMAResouceExistingException as free:
-            raise vimconn.vimconnConflictException("Image {} already exist at VIM. Error {}".format(img_id, free))
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        return img_id
-
-    def get_image_id_from_path(self, path):
-
-        """Get the image id from image path in the VIM database.
-           Returns the image_id or raises a vimconnNotFoundException
-        """
-        self.logger.debug('VIM get_image_id_from_path with args: {}'.format(locals()))
-        try:
-            imgs = self.fos_api.image.list()
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        res = [x.get('uuid') for x in imgs if x.get('uri')==path]
-        if len(res) == 0:
-            raise vimconn.vimconnNotFoundException("Image with this path was not found")
-        return res[0]
-
-    def get_image_list(self, filter_dict={}):
-        """Obtain tenant images from VIM
-        Filter_dict can be:
-            name: image name
-            id: image uuid
-            checksum: image checksum
-            location: image path
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        """
-        self.logger.debug('VIM get_image_list args: {}'.format(locals()))
-        r = []
-        try:
-            fimgs = self.fos_api.image.list()
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-
-        filters = [
-            partial(self.__name_filter, filter_name=filter_dict.get('name')),
-            partial(self.__id_filter,filter_id=filter_dict.get('id')),
-            partial(self.__checksum_filter,filter_checksum=filter_dict.get('checksum'))
-        ]
-
-        r1 = []
-
-        for i in fimgs:
-            match = True
-            for f in filters:
-                match = match and f(i)
-            if match:
-                r1.append(i)
-
-        for i in r1:
-            img_info = {
-                'name':i.get('name'),
-                'id':i.get('uuid'),
-                'checksum':i.get('checksum'),
-                'location':i.get('uri'),
-                'fos':i
-            }
-            r.append(img_info)
-        return r
-        #raise vimconnNotImplemented( "Should have implemented this" )
-
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
-        availability_zone_index=None, availability_zone_list=None):
-        """Adds a VM instance to VIM
-        Params:
-            'start': (boolean) indicates if VM must start or created in pause mode.
-            'image_id','flavor_id': image and flavor VIM id to use for the VM
-            'net_list': list of interfaces, each one is a dictionary with:
-                'name': (optional) name for the interface.
-                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
-                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
-                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
-                'mac_address': (optional) mac address to assign to this interface
-                'ip_address': (optional) IP address to assign to this interface
-                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
-                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
-                'type': (mandatory) can be one of:
-                    'virtual', in this case always connected to a network of type 'net_type=bridge'
-                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
-                           can created unconnected
-                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
-                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
-                            are allocated on the same physical NIC
-                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
-                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
-                                or True, it must apply the default VIM behaviour
-                After execution the method will add the key:
-                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
-                        interface. 'net_list' is modified
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        self.logger.debug('new_vminstance with rgs: {}'.format(locals()))
-        fdu_uuid = '{}'.format(uuid.uuid4())
-
-        flv = self.fos_api.flavor.get(flavor_id)
-        img = self.fos_api.image.get(image_id)
-
-        if flv is None:
-            raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM".format(flavor_id))
-        if img is None:
-            raise vimconn.vimconnNotFoundException("Image {} not found at VIM".format(image_id))
-
-        created_items = {
-            'fdu_id':'',
-            'node_id':'',
-            'connection_points':[]
-            }
-
-        fdu_desc = {
-            'name':name,
-            'uuid':fdu_uuid,
-            'computation_requirements':flv,
-            'image':img,
-            'hypervisor':self.hv,
-            'migration_kind':'LIVE',
-            'interfaces':[],
-            'io_ports':[],
-            'connection_points':[],
-            'depends_on':[]
-        }
-
-        nets = []
-        cps = []
-        intf_id = 0
-        for n in net_list:
-            cp_id = '{}'.format(uuid.uuid4())
-            n.update({'vim_id':cp_id})
-            pair_id = n.get('net_id')
-
-            cp_d = {
-                'uuid':cp_id,
-                'pair_id':pair_id
-            }
-            intf_d = {
-                'name':n.get('name','eth{}'.format(intf_id)),
-                'is_mgmt':False,
-                'if_type':'INTERNAL',
-                'virtual_interface':{
-                    'intf_type':n.get('model','VIRTIO'),
-                    'vpci':n.get('vpci','0:0:0'),
-                    'bandwidth':int(n.get('bw', 100))
-                }
-            }
-            if n.get('mac_address', None) is not None:
-                intf_d['mac_address'] = n['mac_address']
-
-            created_items['connection_points'].append(cp_id)
-            fdu_desc['connection_points'].append(cp_d)
-            fdu_desc['interfaces'].append(intf_d)
-
-            intf_id = intf_id + 1
-
-        if cloud_config is not None:
-            configuration = {
-                    'conf_type':'CLOUD_INIT'
-                }
-            if cloud_config.get('user-data') is not None:
-                configuration.update({'script':cloud_config.get('user-data')})
-            if cloud_config.get('key-pairs') is not None:
-                configuration.update({'ssh_keys':cloud_config.get('key-pairs')})
-
-            if 'script' in configuration:
-                fdu_desc.update({'configuration':configuration})
-
-        ### NODE Selection ###
-        # Infrastructure info
-        #   nodes dict with
-        #        uuid -> node uuid
-        #        computational capabilities -> cpu, ram, and disk available
-        #        hypervisors -> list of available hypervisors (eg. KVM, LXD, BARE)
-        #
-        #
-
-        # UPDATING AVAILABLE INFRASTRUCTURE
-
-        if len(self.nodes) == 0:
-            nodes_id = self.fos_api.node.list()
-        else:
-            nodes_id = self.nodes
-        nodes = []
-        for n in nodes_id:
-            n_info = self.fos_api.node.info(n)
-            if n_info is None:
-                continue
-            n_plugs = []
-            for p in self.fos_api.node.plugins(n):
-                n_plugs.append(self.fos_api.plugin.info(n,p))
-
-            n_cpu_number =  len(n_info.get('cpu'))
-            n_cpu_arch = n_info.get('cpu')[0].get('arch')
-            n_cpu_freq = n_info.get('cpu')[0].get('frequency')
-            n_ram = n_info.get('ram').get('size')
-            n_disk_size = sorted(list(filter(lambda x: 'sda' in x['local_address'], n_info.get('disks'))), key= lambda k: k['dimension'])[-1].get('dimension')
-
-            hvs = []
-            for p in n_plugs:
-                if p.get('type') == 'runtime':
-                    hvs.append(p.get('name'))
-
-            ni = {
-                'uuid':n,
-                'computational_capabilities':{
-                    'cpu_count':n_cpu_number,
-                    'cpu_arch':n_cpu_arch,
-                    'cpu_freq':n_cpu_freq,
-                    'ram_size':n_ram,
-                    'disk_size':n_disk_size
-                },
-                'hypervisors':hvs
-            }
-            nodes.append(ni)
-
-        # NODE SELECTION
-        compatible_nodes = []
-        for n in nodes:
-            if fdu_desc.get('hypervisor') in n.get('hypervisors'):
-                n_comp = n.get('computational_capabilities')
-                f_comp = fdu_desc.get('computation_requirements')
-                if f_comp.get('cpu_arch') == n_comp.get('cpu_arch'):
-                    if f_comp.get('cpu_min_count') <= n_comp.get('cpu_count') and f_comp.get('ram_size_mb') <= n_comp.get('ram_size'):
-                        if f_comp.get('disk_size_gb') <= n_comp.get('disk_size'):
-                            compatible_nodes.append(n)
-
-        if len(compatible_nodes) == 0:
-            raise vimconn.vimconnConflictException("No available nodes at VIM")
-        selected_node = random.choice(compatible_nodes)
-
-        created_items.update({'fdu_id':fdu_uuid, 'node_id': selected_node.get('uuid')})
-
-        self.logger.debug('FOS Node {} FDU Descriptor: {}'.format(selected_node.get('uuid'), fdu_desc))
-
-        try:
-            self.fos_api.fdu.onboard(fdu_desc)
-            instanceid = self.fos_api.fdu.instantiate(fdu_uuid, selected_node.get('uuid'))
-            created_items.update({'instance_id':instanceid})
-
-            self.fdu_node_map.update({instanceid: selected_node.get('uuid')})
-            self.logger.debug('new_vminstance return: {}'.format((fdu_uuid, created_items)))
-            return (instanceid, created_items)
-        except fimerrors.FIMAResouceExistingException as free:
-            raise vimconn.vimconnConflictException("VM already exists at VIM. Error {}".format(free))
-        except Exception as e:
-            raise vimconn.vimconnException("Error while instantiating VM {}. Error {}".format(name, e))
-
-
-    def get_vminstance(self,vm_id):
-        """Returns the VM instance information from VIM"""
-        self.logger.debug('VIM get_vminstance with args: {}'.format(locals()))
-
-        try:
-            intsinfo = self.fos_api.fdu.instance_info(vm_id)
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
-        if intsinfo is None:
-            raise vimconn.vimconnNotFoundException('VM with id {} not found!'.format(vm_id))
-        return intsinfo
-
-
-    def delete_vminstance(self, vm_id, created_items=None):
-        """
-        Removes a VM instance from VIM and each associate elements
-        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
-        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
-            action_vminstance
-        :return: None or the same vm_id. Raises an exception on fail
-        """
-        self.logger.debug('FOS delete_vminstance with args: {}'.format(locals()))
-        fduid =  created_items.get('fdu_id')
-        try:
-            self.fos_api.fdu.terminate(vm_id)
-            self.fos_api.fdu.offload(fduid)
-        except Exception as e:
-            raise vimconn.vimconnException("Error on deletting VM with id {}. Error {}".format(vm_id,e))
-        return vm_id
-
-        #raise vimconnNotImplemented( "Should have implemented this" )
-
-    def refresh_vms_status(self, vm_list):
-        """Get the status of the virtual machines and their interfaces/ports
-           Params: the list of VM identifiers
-           Returns a dictionary with:
-                vm_id:          #VIM id of this Virtual Machine
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                                #  BUILD (on building process), ERROR
-                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                    interfaces: list with interface info. Each item a dictionary with:
-                        vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                        vim_net_id:       #network id where this interface is connected, if provided at creation
-                        vim_interface_id: #interface/port VIM id
-                        ip_address:       #null, or text with IPv4, IPv6 address
-                        compute_node:     #identification of compute node where PF,VF interface is allocated
-                        pci:              #PCI address of the NIC that hosts the PF,VF
-                        vlan:             #physical VLAN used for VF
-        """
-        self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals()))
-        fos2osm_status = {
-            'DEFINE':'OTHER',
-            'CONFIGURE':'INACTIVE',
-            'RUN':'ACTIVE',
-            'PAUSE':'PAUSED',
-            'ERROR':'ERROR'
-        }
-
-        r = {}
-
-        for vm in vm_list:
-            self.logger.debug('FOS refresh_vms_status for {}'.format(vm))
-
-            info = {}
-            nid = self.fdu_node_map.get(vm)
-            if nid is None:
-                r.update({vm:{
-                    'status':'VIM_ERROR',
-                    'error_msg':'Not compute node associated for VM'
-                }})
-                continue
-
-            try:
-                vm_info = self.fos_api.fdu.instance_info(vm)
-            except:
-                r.update({vm:{
-                    'status':'VIM_ERROR',
-                    'error_msg':'unable to connect to VIM'
-                }})
-                continue
-
-            if vm_info is None:
-                r.update({vm:{'status':'DELETED'}})
-                continue
-
-
-            desc = self.fos_api.fdu.info(vm_info['fdu_uuid'])
-            osm_status = fos2osm_status.get(vm_info.get('status'))
-
-            self.logger.debug('FOS status info {}'.format(vm_info))
-            self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status))
-            info.update({'status':osm_status})
-            if vm_info.get('status') == 'ERROR':
-                info.update({'error_msg':vm_info.get('error_code')})
-            info.update({'vim_info':yaml.safe_dump(vm_info)})
-            faces = []
-            i = 0
-            for intf_name in vm_info.get('hypervisor_info').get('network',[]):
-                intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name)
-                face = {}
-                face['compute_node'] = nid
-                face['vim_info'] = yaml.safe_dump(intf_info)
-                face['mac_address'] = intf_info.get('hwaddr')
-                addrs = []
-                for a in intf_info.get('addresses'):
-                    addrs.append(a.get('address'))
-                if len(addrs) >= 0:
-                    face['ip_address'] = ','.join(addrs)
-                else:
-                    face['ip_address'] = ''
-                face['pci'] = '0:0:0.0'
-                # getting net id by CP
-                try:
-                    cp_info = vm_info.get('connection_points')[i]
-                except IndexError:
-                    cp_info = None
-                if cp_info is not None:
-                    cp_id = cp_info['cp_uuid']
-                    cps_d = desc['connection_points']
-                    matches = [x for x in cps_d if x['uuid'] == cp_id]
-                    if len(matches) > 0:
-                        cpd = matches[0]
-                        face['vim_net_id'] = cpd.get('pair_id','')
-                    else:
-                        face['vim_net_id'] = ''
-                    face['vim_interface_id'] = cp_id
-                    # cp_info.get('uuid')
-                else:
-                    face['vim_net_id'] = ''
-                    face['vim_interface_id'] = intf_name
-                faces.append(face)
-                i += 1
-
-
-
-            info.update({'interfaces':faces})
-            r.update({vm:info})
-            self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info))
-        self.logger.debug('FOS refresh_vms_status res is {}'.format(r))
-        return r
-
-
-        #raise vimconnNotImplemented( "Should have implemented this" )
-
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        """
-        Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.
-        created_items is a dictionary with items that
-        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
-        :param action_dict: dictionary with the action to perform
-        :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to
-            the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector
-            dependent, but do not use nested dictionaries and a value of None should be the same as not present. This
-            method can modify this value
-        :return: None, or a console dict
-        """
-        self.logger.debug('VIM action_vminstance with args: {}'.format(locals()))
-        nid = self.fdu_node_map.get(vm_id)
-        if nid is None:
-            raise vimconn.vimconnNotFoundException('No node for this VM')
-        try:
-            fdu_info = self.fos_api.fdu.instance_info(vm_id)
-            if "start" in action_dict:
-                if fdu_info.get('status') == 'CONFIGURE':
-                    self.fos_api.fdu.start(vm_id)
-                elif fdu_info.get('status') == 'PAUSE':
-                    self.fos_api.fdu.resume(vm_id)
-                else:
-                    raise vimconn.vimconnConflictException("Cannot start from this state")
-            elif "pause" in action_dict:
-                if fdu_info.get('status') == 'RUN':
-                    self.fos_api.fdu.pause(vm_id)
-                else:
-                    raise vimconn.vimconnConflictException("Cannot pause from this state")
-            elif "resume" in action_dict:
-                if fdu_info.get('status') == 'PAUSE':
-                    self.fos_api.fdu.resume(vm_id)
-                else:
-                    raise vimconn.vimconnConflictException("Cannot resume from this state")
-            elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict:
-                if fdu_info.get('status') == 'RUN':
-                    self.fos_api.fdu.stop(vm_id)
-                else:
-                    raise vimconn.vimconnConflictException("Cannot shutoff from this state")
-            elif "terminate" in action_dict:
-                if fdu_info.get('status') == 'RUN':
-                    self.fos_api.fdu.stop(vm_id)
-                    self.fos_api.fdu.clean(vm_id)
-                    self.fos_api.fdu.undefine(vm_id)
-                    # self.fos_api.fdu.offload(vm_id)
-                elif fdu_info.get('status') == 'CONFIGURE':
-                    self.fos_api.fdu.clean(vm_id)
-                    self.fos_api.fdu.undefine(vm_id)
-                    # self.fos_api.fdu.offload(vm_id)
-                elif fdu_info.get('status') == 'PAUSE':
-                    self.fos_api.fdu.resume(vm_id)
-                    self.fos_api.fdu.stop(vm_id)
-                    self.fos_api.fdu.clean(vm_id)
-                    self.fos_api.fdu.undefine(vm_id)
-                    # self.fos_api.fdu.offload(vm_id)
-                else:
-                    raise vimconn.vimconnConflictException("Cannot terminate from this state")
-            elif "rebuild" in action_dict:
-                raise vimconnNotImplemented("Rebuild not implememnted")
-            elif "reboot" in action_dict:
-                if fdu_info.get('status') == 'RUN':
-                    self.fos_api.fdu.stop(vm_id)
-                    self.fos_api.fdu.start(vm_id)
-                else:
-                    raise vimconn.vimconnConflictException("Cannot reboot from this state")
-        except Exception as e:
-            raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
diff --git a/osm_ro/vimconn_opennebula.py b/osm_ro/vimconn_opennebula.py
deleted file mode 100644 (file)
index cf1a8fb..0000000
+++ /dev/null
@@ -1,684 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2017  Telefonica Digital Spain S.L.U.
-# This file is part of ETSI OSM
-#  All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: patent-office@telefonica.com
-##
-
-"""
-vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API.
-"""
-__author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \
-             "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
-__date__ = "$13-dec-2017 11:09:29$"
-import vimconn
-import requests
-import logging
-import oca
-import untangle
-import math
-import random
-import pyone
-
-class vimconnector(vimconn.vimconnector):
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
-                 log_level="DEBUG", config={}, persistent_info={}):
-
-        """Constructor of VIM
-        Params:
-            'uuid': id asigned to this VIM
-            'name': name assigned to this VIM, can be used for logging
-            'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
-            'url_admin': (optional), url used for administrative tasks
-            'user', 'passwd': credentials of the VIM user
-            'log_level': provider if it should use a different log_level than the general one
-            'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
-                    at creation and particular VIM config at teh attachment
-            'persistent_info': dict where the class can store information that will be available among class
-                    destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
-                    empty dict. Useful to store login/tokens information for speed up communication
-
-        Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
-            check against the VIM
-        """
-
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
-                                      config)
-
-    def _new_one_connection(self):
-        return pyone.OneServer(self.url, session=self.user + ':' + self.passwd)
-
-    def new_tenant(self, tenant_name, tenant_description):
-        # '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
-        try:
-            client = oca.Client(self.user + ':' + self.passwd, self.url)
-            group_list = oca.GroupPool(client)
-            user_list = oca.UserPool(client)
-            group_list.info()
-            user_list.info()
-            create_primarygroup = 1
-            # create group-tenant
-            for group in group_list:
-                if str(group.name) == str(tenant_name):
-                    create_primarygroup = 0
-                    break
-            if create_primarygroup == 1:
-                oca.Group.allocate(client, tenant_name)
-            group_list.info()
-            # set to primary_group the tenant_group and oneadmin to secondary_group
-            for group in group_list:
-                if str(group.name) == str(tenant_name):
-                    for user in user_list:
-                        if str(user.name) == str(self.user):
-                            if user.name == "oneadmin":
-                                return str(0)
-                            else:
-                                self._add_secondarygroup(user.id, group.id)
-                                user.chgrp(group.id)
-                                return str(group.id)
-        except Exception as e:
-            self.logger.error("Create new tenant error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def delete_tenant(self, tenant_id):
-        """Delete a tenant from VIM. Returns the old tenant identifier"""
-        try:
-            client = oca.Client(self.user + ':' + self.passwd, self.url)
-            group_list = oca.GroupPool(client)
-            user_list = oca.UserPool(client)
-            group_list.info()
-            user_list.info()
-            for group in group_list:
-                if str(group.id) == str(tenant_id):
-                    for user in user_list:
-                        if str(user.name) == str(self.user):
-                            self._delete_secondarygroup(user.id, group.id)
-                            group.delete(client)
-                    return None
-            raise vimconn.vimconnNotFoundException("Group {} not found".format(tenant_id))
-        except Exception as e:
-            self.logger.error("Delete tenant " + str(tenant_id) + " error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def _add_secondarygroup(self, id_user, id_group):
-        # change secondary_group to primary_group
-        params = '<?xml version="1.0"?> \
-                   <methodCall>\
-                   <methodName>one.user.addgroup</methodName>\
-                   <params>\
-                   <param>\
-                   <value><string>{}:{}</string></value>\
-                   </param>\
-                   <param>\
-                   <value><int>{}</int></value>\
-                   </param>\
-                   <param>\
-                   <value><int>{}</int></value>\
-                   </param>\
-                   </params>\
-                   </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
-        requests.post(self.url, params)
-
-    def _delete_secondarygroup(self, id_user, id_group):
-        params = '<?xml version="1.0"?> \
-                   <methodCall>\
-                   <methodName>one.user.delgroup</methodName>\
-                   <params>\
-                   <param>\
-                   <value><string>{}:{}</string></value>\
-                   </param>\
-                   <param>\
-                   <value><int>{}</int></value>\
-                   </param>\
-                   <param>\
-                   <value><int>{}</int></value>\
-                   </param>\
-                   </params>\
-                   </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
-        requests.post(self.url, params)
-
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):  # , **vim_specific):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network
-                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
-                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway_address': (Optional) ip_schema, that is X.X.X.X
-                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
-                'dhcp_enabled': True or False
-                'dhcp_start_address': ip_schema, first IP to grant
-                'dhcp_count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-
-        # oca library method cannot be used in this case (problem with cluster parameters)
-        try:
-            created_items = {}
-            one = self._new_one_connection()
-            size = "254"
-            if ip_profile is None:
-                subnet_rand = random.randint(0, 255)
-                ip_start = "192.168.{}.1".format(subnet_rand)
-            else:
-                index = ip_profile["subnet_address"].find("/")
-                ip_start = ip_profile["subnet_address"][:index]
-                if "dhcp_count" in ip_profile.keys() and ip_profile["dhcp_count"] is not None:
-                    size = str(ip_profile["dhcp_count"])
-                elif not ("dhcp_count" in ip_profile.keys()) and ip_profile["ip_version"] == "IPv4":
-                    prefix = ip_profile["subnet_address"][index + 1:]
-                    size = int(math.pow(2, 32 - prefix))
-                if "dhcp_start_address" in ip_profile.keys() and ip_profile["dhcp_start_address"] is not None:
-                    ip_start = str(ip_profile["dhcp_start_address"])
-                if ip_profile["ip_version"] == "IPv6":
-                    ip_prefix_type = "GLOBAL_PREFIX"
-
-            if vlan is not None:
-                vlan_id = vlan
-            else:
-                vlan_id = str(random.randint(100, 4095))
-            #if "internal" in net_name:
-            # OpenNebula not support two networks with same name
-            random_net_name = str(random.randint(1, 1000000))
-            net_name = net_name + random_net_name
-            net_id = one.vn.allocate({
-                        'NAME': net_name,
-                        'VN_MAD': '802.1Q',
-                        'PHYDEV': self.config["network"]["phydev"],
-                        'VLAN_ID': vlan_id
-                    }, self.config["cluster"]["id"])
-            arpool = {'AR_POOL': {
-                        'AR': {
-                            'TYPE': 'IP4',
-                            'IP': ip_start,
-                            'SIZE': size
-                        }
-                    }
-            }
-            one.vn.add_ar(net_id, arpool)
-            return net_id, created_items
-        except Exception as e:
-            self.logger.error("Create new network error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def get_network_list(self, filter_dict={}):
-        """Obtain tenant networks of VIM
-        Params:
-            'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
-                name: string  => returns only networks with this name
-                id:   string  => returns networks with this VIM id, this imply returns one network at most
-                shared: boolean >= returns only networks that are (or are not) shared
-                tenant_id: sting => returns only networks that belong to this tenant/project
-                ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
-                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
-        Returns the network list of dictionaries. each dictionary contains:
-            'id': (mandatory) VIM network id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
-            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-
-        try:
-            one = self._new_one_connection()
-            net_pool = one.vnpool.info(-2, -1, -1).VNET
-            response = []
-            if "name" in filter_dict.keys():
-                network_name_filter = filter_dict["name"]
-            else:
-                network_name_filter = None
-            if "id" in filter_dict.keys():
-                network_id_filter = filter_dict["id"]
-            else:
-                network_id_filter = None
-            for network in net_pool:
-                if network.NAME == network_name_filter or str(network.ID) == str(network_id_filter):
-                    net_dict = {"name": network.NAME, "id": str(network.ID), "status": "ACTIVE"}
-                    response.append(net_dict)
-            return response
-        except Exception as e:
-            self.logger.error("Get network list error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def get_network(self, net_id):
-        """Obtain network details from the 'net_id' VIM network
-        Return a dict that contains:
-            'id': (mandatory) VIM network id, that is, net_id
-            'name': (mandatory) VIM network name
-            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        Raises an exception upon error or when network is not found
-        """
-        try:
-            one = self._new_one_connection()
-            net_pool = one.vnpool.info(-2, -1, -1).VNET
-            net = {}
-            for network in net_pool:
-                if str(network.ID) == str(net_id):
-                    net['id'] = network.ID
-                    net['name'] = network.NAME
-                    net['status'] = "ACTIVE"
-                    break
-            if net:
-                return net
-            else:
-                raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
-        except Exception as e:
-            self.logger.error("Get network " + str(net_id) + " error): " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def delete_network(self, net_id, created_items=None):
-        """
-        Removes a tenant network from VIM and its associated elements
-        :param net_id: VIM identifier of the network, provided by method new_network
-        :param created_items: dictionary with extra items to be deleted. provided by method new_network
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-        try:
-
-            one = self._new_one_connection()
-            one.vn.delete(int(net_id))
-            return net_id
-        except Exception as e:
-            self.logger.error("Delete network " + str(net_id) + "error: network not found" + str(e))
-            raise vimconn.vimconnException(e)
-
-    def refresh_nets_status(self, net_list):
-        """Get the status of the networks
-        Params:
-            'net_list': a list with the VIM network id to be get the status
-        Returns a dictionary with:
-            'net_id':         #VIM id of this network
-                status:     #Mandatory. Text with one of:
-                    #  DELETED (not found at vim)
-                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
-                    #  OTHER (Vim reported other status not understood)
-                    #  ERROR (VIM indicates an ERROR status)
-                    #  ACTIVE, INACTIVE, DOWN (admin down),
-                    #  BUILD (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-            'net_id2': ...
-        """
-        net_dict = {}
-        try:
-            for net_id in net_list:
-                net = {}
-                try:
-                    net_vim = self.get_network(net_id)
-                    net["status"] = net_vim["status"]
-                    net["vim_info"] = None
-                except vimconn.vimconnNotFoundException as e:
-                    self.logger.error("Exception getting net status: {}".format(str(e)))
-                    net['status'] = "DELETED"
-                    net['error_msg'] = str(e)
-                except vimconn.vimconnException as e:
-                    self.logger.error(e)
-                    net["status"] = "VIM_ERROR"
-                    net["error_msg"] = str(e)
-                net_dict[net_id] = net
-            return net_dict
-        except vimconn.vimconnException as e:
-            self.logger.error(e)
-            for k in net_dict:
-                net_dict[k]["status"] = "VIM_ERROR"
-                net_dict[k]["error_msg"] = str(e)
-            return net_dict
-
-    def get_flavor(self, flavor_id):  # Esta correcto
-        """Obtain flavor details from the VIM
-        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
-        Raises an exception upon error or if not found
-        """
-        try:
-
-            one = self._new_one_connection()
-            template = one.template.info(int(flavor_id))
-            if template is not None:
-                return {'id': template.ID, 'name': template.NAME}
-            raise vimconn.vimconnNotFoundException("Flavor {} not found".format(flavor_id))
-        except Exception as e:
-            self.logger.error("get flavor " + str(flavor_id) + " error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def new_flavor(self, flavor_data):
-        """Adds a tenant flavor to VIM
-            flavor_data contains a dictionary with information, keys:
-                name: flavor name
-                ram: memory (cloud type) in MBytes
-                vpcus: cpus (cloud type)
-                extended: EPA parameters
-                  - numas: #items requested in same NUMA
-                        memory: number of 1G huge pages memory
-                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
-                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
-                          - name: interface name
-                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
-                            bandwidth: X Gbps; requested guarantee bandwidth
-                            vpci: requested virtual PCI address
-                disk: disk size
-                is_public:
-                 #TODO to concrete
-        Returns the flavor identifier"""
-
-        disk_size = str(int(flavor_data["disk"])*1024)
-
-        try:
-            one = self._new_one_connection()
-            template_id = one.template.allocate({
-                'TEMPLATE': {
-                    'NAME': flavor_data["name"],
-                    'CPU': flavor_data["vcpus"],
-                    'VCPU': flavor_data["vcpus"],
-                    'MEMORY': flavor_data["ram"],
-                    'DISK': {
-                        'SIZE': disk_size
-                    },
-                    'CONTEXT': {
-                        'NETWORK': "YES",
-                        'SSH_PUBLIC_KEY': '$USER[SSH_PUBLIC_KEY]'
-                    },
-                    'GRAPHICS': {
-                        'LISTEN': '0.0.0.0',
-                        'TYPE': 'VNC'
-                    },
-                    'CLUSTER_ID': self.config["cluster"]["id"]
-                }
-            })
-            return template_id
-
-        except Exception as e:
-            self.logger.error("Create new flavor error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def delete_flavor(self, flavor_id):
-        """ Deletes a tenant flavor from VIM
-            Returns the old flavor_id
-        """
-        try:
-            one = self._new_one_connection()
-            one.template.delete(int(flavor_id), False)
-            return flavor_id
-        except Exception as e:
-            self.logger.error("Error deleting flavor " + str(flavor_id) + ". Flavor not found")
-            raise vimconn.vimconnException(e)
-
-    def get_image_list(self, filter_dict={}):
-        """Obtain tenant images from VIM
-        Filter_dict can be:
-            name: image name
-            id: image uuid
-            checksum: image checksum
-            location: image path
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        """
-        try:
-            one = self._new_one_connection()
-            image_pool = one.imagepool.info(-2, -1, -1).IMAGE
-            images = []
-            if "name" in filter_dict.keys():
-                image_name_filter = filter_dict["name"]
-            else:
-                image_name_filter = None
-            if "id" in filter_dict.keys():
-                image_id_filter = filter_dict["id"]
-            else:
-                image_id_filter = None
-            for image in image_pool:
-                if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(image_id_filter):
-                    images_dict = {"name": image.NAME, "id": str(image.ID)}
-                    images.append(images_dict)
-            return images
-        except Exception as e:
-            self.logger.error("Get image list error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
-                       availability_zone_index=None, availability_zone_list=None):
-
-        """Adds a VM instance to VIM
-            Params:
-                'start': (boolean) indicates if VM must start or created in pause mode.
-                'image_id','flavor_id': image and flavor VIM id to use for the VM
-                'net_list': list of interfaces, each one is a dictionary with:
-                    'name': (optional) name for the interface.
-                    'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
-                    'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
-                    'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
-                    'mac_address': (optional) mac address to assign to this interface
-                    'ip_address': (optional) IP address to assign to this interface
-                    #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
-                        the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
-                    'type': (mandatory) can be one of:
-                        'virtual', in this case always connected to a network of type 'net_type=bridge'
-                        'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
-                            can created unconnected
-                        'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
-                        'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
-                                are allocated on the same physical NIC
-                    'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
-                    'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
-                                    or True, it must apply the default VIM behaviour
-                    After execution the method will add the key:
-                    'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
-                            interface. 'net_list' is modified
-                'cloud_config': (optional) dictionary with:
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                    'users': (optional) list of users to be inserted, each item is a dict with:
-                        'name': (mandatory) user name,
-                        'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                    'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                        or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-                    'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                        'dest': (mandatory) string with the destination absolute path
-                        'encoding': (optional, by default text). Can be one of:
-                            'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                        'content' (mandatory): string with the content of the file
-                        'permissions': (optional) string with file permissions, typically octal notation '0644'
-                        'owner': (optional) file owner, string with the format 'owner:group'
-                    'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-                'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                    'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                    'size': (mandatory) string with the size of the disk in GB
-                availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-                availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                    availability_zone_index is None
-            Returns a tuple with the instance identifier and created_items or raises an exception on error
-                created_items can be None or a dictionary where this method can include key-values that will be passed to
-                the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-                Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-                as not present.
-            """
-        self.logger.debug(
-            "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list)))
-        try:
-            one = self._new_one_connection()
-            template_vim = one.template.info(int(flavor_id), True)
-            disk_size = str(template_vim.TEMPLATE["DISK"]["SIZE"])
-
-            one = self._new_one_connection()
-            template_updated = ""
-            for net in net_list:
-                net_in_vim = one.vn.info(int(net["net_id"]))
-                net["vim_id"] = str(net_in_vim.ID)
-                network = 'NIC = [NETWORK = "{}",NETWORK_UNAME = "{}" ]'.format(
-                    net_in_vim.NAME, net_in_vim.UNAME)
-                template_updated += network
-
-            template_updated += "DISK = [ IMAGE_ID = {},\n  SIZE = {}]".format(image_id, disk_size)
-
-            if isinstance(cloud_config, dict):
-                if cloud_config.get("key-pairs"):
-                    context = 'CONTEXT = [\n  NETWORK = "YES",\n  SSH_PUBLIC_KEY = "'
-                    for key in cloud_config["key-pairs"]:
-                        context += key + '\n'
-                    # if False:
-                    #     context += '"\n  USERNAME = '
-                    context += '"]'
-                    template_updated += context
-
-            vm_instance_id = one.template.instantiate(int(flavor_id), name, False, template_updated)
-            self.logger.info(
-                "Instanciating in OpenNebula a new VM name:{} id:{}".format(name, flavor_id))
-            return str(vm_instance_id), None
-        except pyone.OneNoExistsException as e:
-            self.logger.error("Network with id " + str(e) + " not found: " + str(e))
-            raise vimconn.vimconnNotFoundException(e)
-        except Exception as e:
-            self.logger.error("Create new vm instance error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def get_vminstance(self, vm_id):
-        """Returns the VM instance information from VIM"""
-        try:
-            one = self._new_one_connection()
-            vm = one.vm.info(int(vm_id))
-            return vm
-        except Exception as e:
-            self.logger.error("Getting vm instance error: " + str(e) + ": VM Instance not found")
-            raise vimconn.vimconnException(e)
-
-    def delete_vminstance(self, vm_id, created_items=None):
-        """
-        Removes a VM instance from VIM and its associated elements
-        :param vm_id: VIM identifier of the VM, provided by method new_vminstance
-        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
-            action_vminstance
-        :return: None or the same vm_id. Raises an exception on fail
-        """
-        try:
-            one = self._new_one_connection()
-            one.vm.recover(int(vm_id), 3)
-            vm = None
-            while True:
-                if vm is not None and vm.LCM_STATE == 0:
-                    break
-                else:
-                    vm = one.vm.info(int(vm_id))
-
-        except pyone.OneNoExistsException as e:
-            self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted")
-            raise vimconn.vimconnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id))
-        except Exception as e:
-            self.logger.error("Delete vm instance " + str(vm_id) + " error: " + str(e))
-            raise vimconn.vimconnException(e)
-
-    def refresh_vms_status(self, vm_list):
-        """Get the status of the virtual machines and their interfaces/ports
-           Params: the list of VM identifiers
-           Returns a dictionary with:
-                vm_id:          #VIM id of this Virtual Machine
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                                #  BUILD (on building process), ERROR
-                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                    interfaces: list with interface info. Each item a dictionary with:
-                        vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                        vim_net_id:       #network id where this interface is connected, if provided at creation
-                        vim_interface_id: #interface/port VIM id
-                        ip_address:       #null, or text with IPv4, IPv6 address
-                        compute_node:     #identification of compute node where PF,VF interface is allocated
-                        pci:              #PCI address of the NIC that hosts the PF,VF
-                        vlan:             #physical VLAN used for VF
-        """
-        vm_dict = {}
-        try:
-            for vm_id in vm_list:
-                vm = {}
-                if self.get_vminstance(vm_id) is not None:
-                    vm_element = self.get_vminstance(vm_id)
-                else:
-                    self.logger.info("The vm " + str(vm_id) + " does not exist.")
-                    vm['status'] = "DELETED"
-                    vm['error_msg'] = ("The vm " + str(vm_id) + " does not exist.")
-                    continue
-                vm["vim_info"] = None
-                vm_status = vm_element.LCM_STATE
-                if vm_status == 3:
-                    vm['status'] = "ACTIVE"
-                elif vm_status == 36:
-                    vm['status'] = "ERROR"
-                    vm['error_msg'] = "VM failure"
-                else:
-                    vm['status'] = "BUILD"
-
-                if vm_element is not None:
-                    interfaces = self._get_networks_vm(vm_element)
-                    vm["interfaces"] = interfaces
-                vm_dict[vm_id] = vm
-            return vm_dict
-        except Exception as e:
-            self.logger.error(e)
-            for k in vm_dict:
-                vm_dict[k]["status"] = "VIM_ERROR"
-                vm_dict[k]["error_msg"] = str(e)
-            return vm_dict
-
-    def _get_networks_vm(self, vm_element):
-        interfaces = []
-        try:
-            if isinstance(vm_element.TEMPLATE["NIC"], list):
-                for net in vm_element.TEMPLATE["NIC"]:
-                    interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
-                                 "vim_interface_id": str(net["NETWORK_ID"])}
-                    # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
-                    if u'IP' in net:
-                        interface["ip_address"] = str(net["IP"])
-                    if u'IP6_GLOBAL' in net:
-                        interface["ip_address"] = str(net["IP6_GLOBAL"])
-                    interfaces.append(interface)
-            else:
-                net = vm_element.TEMPLATE["NIC"]
-                interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
-                             "vim_interface_id": str(net["NETWORK_ID"])}
-                # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
-                if u'IP' in net:
-                    interface["ip_address"] = str(net["IP"])
-                if u'IP6_GLOBAL' in net:
-                    interface["ip_address"] = str(net["IP6_GLOBAL"])
-                interfaces.append(interface)
-            return interfaces
-        except Exception as e:
-            self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID))
diff --git a/osm_ro/vimconn_openstack.py b/osm_ro/vimconn_openstack.py
deleted file mode 100644 (file)
index 4a897a3..0000000
+++ /dev/null
@@ -1,2239 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-osconnector implements all the methods to interact with openstack using the python-neutronclient.
-
-For the VNF forwarding graph, The OpenStack VIM connector calls the
-networking-sfc Neutron extension methods, whose resources are mapped
-to the VIM connector's SFC resources as follows:
-- Classification (OSM) -> Flow Classifier (Neutron)
-- Service Function Instance (OSM) -> Port Pair (Neutron)
-- Service Function (OSM) -> Port Pair Group (Neutron)
-- Service Function Path (OSM) -> Port Chain (Neutron)
-'''
-__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
-__date__  = "$22-sep-2017 23:59:59$"
-
-import vimconn
-# import json
-import logging
-import netaddr
-import time
-import yaml
-import random
-import re
-import copy
-from pprint import pformat
-from types import StringTypes
-
-from novaclient import client as nClient, exceptions as nvExceptions
-from keystoneauth1.identity import v2, v3
-from keystoneauth1 import session
-import keystoneclient.exceptions as ksExceptions
-import keystoneclient.v3.client as ksClient_v3
-import keystoneclient.v2_0.client as ksClient_v2
-from glanceclient import client as glClient
-import glanceclient.exc as gl1Exceptions
-from  cinderclient import client as cClient
-from httplib import HTTPException
-from neutronclient.neutron import client as neClient
-from neutronclient.common import exceptions as neExceptions
-from requests.exceptions import ConnectionError
-
-
-"""contain the openstack virtual machine status to openmano status"""
-vmStatus2manoFormat={'ACTIVE':'ACTIVE',
-                     'PAUSED':'PAUSED',
-                     'SUSPENDED': 'SUSPENDED',
-                     'SHUTOFF':'INACTIVE',
-                     'BUILD':'BUILD',
-                     'ERROR':'ERROR','DELETED':'DELETED'
-                     }
-netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
-                     }
-
-supportedClassificationTypes = ['legacy_flow_classifier']
-
-#global var to have a timeout creating and deleting volumes
-volume_timeout = 600
-server_timeout = 600
-
-
-class SafeDumper(yaml.SafeDumper):
-    def represent_data(self, data):
-        # Openstack APIs use custom subclasses of dict and YAML safe dumper
-        # is designed to not handle that (reference issue 142 of pyyaml)
-        if isinstance(data, dict) and data.__class__ != dict:
-            # A simple solution is to convert those items back to dicts
-            data = dict(data.items())
-
-        return super(SafeDumper, self).represent_data(data)
-
-
-class vimconnector(vimconn.vimconnector):
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
-                 log_level=None, config={}, persistent_info={}):
-        '''using common constructor parameters. In this case
-        'url' is the keystone authorization url,
-        'url_admin' is not use
-        '''
-        api_version = config.get('APIversion')
-        if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'):
-            raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. "
-                                           "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version))
-        vim_type = config.get('vim_type')
-        if vim_type and vim_type not in ('vio', 'VIO'):
-            raise vimconn.vimconnException("Invalid value '{}' for config:vim_type."
-                            "Allowed values are 'vio' or 'VIO'".format(vim_type))
-
-        if config.get('dataplane_net_vlan_range') is not None:
-            #validate vlan ranges provided by user
-            self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range')
-
-        if config.get('multisegment_vlan_range') is not None:
-            #validate vlan ranges provided by user
-            self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range')
-
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
-                                      config)
-
-        if self.config.get("insecure") and self.config.get("ca_cert"):
-            raise vimconn.vimconnException("options insecure and ca_cert are mutually exclusive")
-        self.verify = True
-        if self.config.get("insecure"):
-            self.verify = False
-        if self.config.get("ca_cert"):
-            self.verify = self.config.get("ca_cert")
-
-        if not url:
-            raise TypeError('url param can not be NoneType')
-        self.persistent_info = persistent_info
-        self.availability_zone = persistent_info.get('availability_zone', None)
-        self.session = persistent_info.get('session', {'reload_client': True})
-        self.my_tenant_id = self.session.get('my_tenant_id')
-        self.nova = self.session.get('nova')
-        self.neutron = self.session.get('neutron')
-        self.cinder = self.session.get('cinder')
-        self.glance = self.session.get('glance')
-        # self.glancev1 = self.session.get('glancev1')
-        self.keystone = self.session.get('keystone')
-        self.api_version3 = self.session.get('api_version3')
-        self.vim_type = self.config.get("vim_type")
-        if self.vim_type:
-            self.vim_type = self.vim_type.upper()
-        if self.config.get("use_internal_endpoint"):
-            self.endpoint_type = "internalURL"
-        else:
-            self.endpoint_type = None
-
-        self.logger = logging.getLogger('openmano.vim.openstack')
-
-        # allow security_groups to be a list or a single string
-        if isinstance(self.config.get('security_groups'), str):
-            self.config['security_groups'] = [self.config['security_groups']]
-        self.security_groups_id = None
-
-        ####### VIO Specific Changes #########
-        if self.vim_type == "VIO":
-            self.logger = logging.getLogger('openmano.vim.vio')
-
-        if log_level:
-            self.logger.setLevel( getattr(logging, log_level))
-
-    def __getitem__(self, index):
-        """Get individuals parameters.
-        Throw KeyError"""
-        if index == 'project_domain_id':
-            return self.config.get("project_domain_id")
-        elif index == 'user_domain_id':
-            return self.config.get("user_domain_id")
-        else:
-            return vimconn.vimconnector.__getitem__(self, index)
-
-    def __setitem__(self, index, value):
-        """Set individuals parameters and it is marked as dirty so to force connection reload.
-        Throw KeyError"""
-        if index == 'project_domain_id':
-            self.config["project_domain_id"] = value
-        elif index == 'user_domain_id':
-                self.config["user_domain_id"] = value
-        else:
-            vimconn.vimconnector.__setitem__(self, index, value)
-        self.session['reload_client'] = True
-
-    def serialize(self, value):
-        """Serialization of python basic types.
-
-        In the case value is not serializable a message will be logged and a
-        simple representation of the data that cannot be converted back to
-        python is returned.
-        """
-        if isinstance(value, StringTypes):
-            return value
-
-        try:
-            return yaml.dump(value, Dumper=SafeDumper,
-                             default_flow_style=True, width=256)
-        except yaml.representer.RepresenterError:
-                self.logger.debug(
-                    'The following entity cannot be serialized in YAML:'
-                    '\n\n%s\n\n', pformat(value), exc_info=True)
-                return str(value)
-
-    def _reload_connection(self):
-        '''Called before any operation, it check if credentials has changed
-        Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
-        '''
-        #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
-        if self.session['reload_client']:
-            if self.config.get('APIversion'):
-                self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
-            else:  # get from ending auth_url that end with v3 or with v2.0
-                self.api_version3 =  self.url.endswith("/v3") or self.url.endswith("/v3/")
-            self.session['api_version3'] = self.api_version3
-            if self.api_version3:
-                if self.config.get('project_domain_id') or self.config.get('project_domain_name'):
-                    project_domain_id_default = None
-                else:
-                    project_domain_id_default = 'default'
-                if self.config.get('user_domain_id') or self.config.get('user_domain_name'):
-                    user_domain_id_default = None
-                else:
-                    user_domain_id_default = 'default'
-                auth = v3.Password(auth_url=self.url,
-                                   username=self.user,
-                                   password=self.passwd,
-                                   project_name=self.tenant_name,
-                                   project_id=self.tenant_id,
-                                   project_domain_id=self.config.get('project_domain_id', project_domain_id_default),
-                                   user_domain_id=self.config.get('user_domain_id', user_domain_id_default),
-                                   project_domain_name=self.config.get('project_domain_name'),
-                                   user_domain_name=self.config.get('user_domain_name'))
-            else:
-                auth = v2.Password(auth_url=self.url,
-                                   username=self.user,
-                                   password=self.passwd,
-                                   tenant_name=self.tenant_name,
-                                   tenant_id=self.tenant_id)
-            sess = session.Session(auth=auth, verify=self.verify)
-            # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
-            region_name = self.config.get('region_name')
-            if self.api_version3:
-                self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
-            else:
-                self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
-            self.session['keystone'] = self.keystone
-            # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
-            # This implementation approach is due to the warning message in
-            # https://developer.openstack.org/api-guide/compute/microversions.html
-            # where it is stated that microversion backwards compatibility is not guaranteed and clients should
-            # always require an specific microversion.
-            # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
-            version = self.config.get("microversion")
-            if not version:
-                version = "2.1"
-            # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
-            self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
-            self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
-            self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
-            try:
-                self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
-            except Exception as e:
-                self.logger.error("Cannot get project_id from session", exc_info=True)
-            if self.endpoint_type == "internalURL":
-                glance_service_id = self.keystone.services.list(name="glance")[0].id
-                glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
-            else:
-                glance_endpoint = None
-            self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
-            # using version 1 of glance client in new_image()
-            # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
-            #                                                            endpoint=glance_endpoint)
-            self.session['reload_client'] = False
-            self.persistent_info['session'] = self.session
-            # add availablity zone info inside  self.persistent_info
-            self._set_availablity_zones()
-            self.persistent_info['availability_zone'] = self.availability_zone
-            self.security_groups_id = None  # force to get again security_groups_ids next time they are needed
-
-    def __net_os2mano(self, net_list_dict):
-        '''Transform the net openstack format to mano format
-        net_list_dict can be a list of dict or a single dict'''
-        if type(net_list_dict) is dict:
-            net_list_=(net_list_dict,)
-        elif type(net_list_dict) is list:
-            net_list_=net_list_dict
-        else:
-            raise TypeError("param net_list_dict must be a list or a dictionary")
-        for net in net_list_:
-            if net.get('provider:network_type') == "vlan":
-                net['type']='data'
-            else:
-                net['type']='bridge'
-
-    def __classification_os2mano(self, class_list_dict):
-        """Transform the openstack format (Flow Classifier) to mano format
-        (Classification) class_list_dict can be a list of dict or a single dict
-        """
-        if isinstance(class_list_dict, dict):
-            class_list_ = [class_list_dict]
-        elif isinstance(class_list_dict, list):
-            class_list_ = class_list_dict
-        else:
-            raise TypeError(
-                "param class_list_dict must be a list or a dictionary")
-        for classification in class_list_:
-            id = classification.pop('id')
-            name = classification.pop('name')
-            description = classification.pop('description')
-            project_id = classification.pop('project_id')
-            tenant_id = classification.pop('tenant_id')
-            original_classification = copy.deepcopy(classification)
-            classification.clear()
-            classification['ctype'] = 'legacy_flow_classifier'
-            classification['definition'] = original_classification
-            classification['id'] = id
-            classification['name'] = name
-            classification['description'] = description
-            classification['project_id'] = project_id
-            classification['tenant_id'] = tenant_id
-
-    def __sfi_os2mano(self, sfi_list_dict):
-        """Transform the openstack format (Port Pair) to mano format (SFI)
-        sfi_list_dict can be a list of dict or a single dict
-        """
-        if isinstance(sfi_list_dict, dict):
-            sfi_list_ = [sfi_list_dict]
-        elif isinstance(sfi_list_dict, list):
-            sfi_list_ = sfi_list_dict
-        else:
-            raise TypeError(
-                "param sfi_list_dict must be a list or a dictionary")
-        for sfi in sfi_list_:
-            sfi['ingress_ports'] = []
-            sfi['egress_ports'] = []
-            if sfi.get('ingress'):
-                sfi['ingress_ports'].append(sfi['ingress'])
-            if sfi.get('egress'):
-                sfi['egress_ports'].append(sfi['egress'])
-            del sfi['ingress']
-            del sfi['egress']
-            params = sfi.get('service_function_parameters')
-            sfc_encap = False
-            if params:
-                correlation = params.get('correlation')
-                if correlation:
-                    sfc_encap = True
-            sfi['sfc_encap'] = sfc_encap
-            del sfi['service_function_parameters']
-
-    def __sf_os2mano(self, sf_list_dict):
-        """Transform the openstack format (Port Pair Group) to mano format (SF)
-        sf_list_dict can be a list of dict or a single dict
-        """
-        if isinstance(sf_list_dict, dict):
-            sf_list_ = [sf_list_dict]
-        elif isinstance(sf_list_dict, list):
-            sf_list_ = sf_list_dict
-        else:
-            raise TypeError(
-                "param sf_list_dict must be a list or a dictionary")
-        for sf in sf_list_:
-            del sf['port_pair_group_parameters']
-            sf['sfis'] = sf['port_pairs']
-            del sf['port_pairs']
-
-    def __sfp_os2mano(self, sfp_list_dict):
-        """Transform the openstack format (Port Chain) to mano format (SFP)
-        sfp_list_dict can be a list of dict or a single dict
-        """
-        if isinstance(sfp_list_dict, dict):
-            sfp_list_ = [sfp_list_dict]
-        elif isinstance(sfp_list_dict, list):
-            sfp_list_ = sfp_list_dict
-        else:
-            raise TypeError(
-                "param sfp_list_dict must be a list or a dictionary")
-        for sfp in sfp_list_:
-            params = sfp.pop('chain_parameters')
-            sfc_encap = False
-            if params:
-                correlation = params.get('correlation')
-                if correlation:
-                    sfc_encap = True
-            sfp['sfc_encap'] = sfc_encap
-            sfp['spi'] = sfp.pop('chain_id')
-            sfp['classifications'] = sfp.pop('flow_classifiers')
-            sfp['service_functions'] = sfp.pop('port_pair_groups')
-
-    # placeholder for now; read TODO note below
-    def _validate_classification(self, type, definition):
-        # only legacy_flow_classifier Type is supported at this point
-        return True
-        # TODO(igordcard): this method should be an abstract method of an
-        # abstract Classification class to be implemented by the specific
-        # Types. Also, abstract vimconnector should call the validation
-        # method before the implemented VIM connectors are called.
-
-    def _format_exception(self, exception):
-        '''Transform a keystone, nova, neutron  exception into a vimconn exception'''
-
-        # Fixing bug 665 https://osm.etsi.org/bugzilla/show_bug.cgi?id=665
-        # There are some openstack versions that message error are unicode with non English
-        message_error = exception.message
-        if isinstance(message_error, unicode):
-            message_error = message_error.encode("utf")
-
-        if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
-                                  gl1Exceptions.HTTPNotFound)):
-            raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error)
-        elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
-                               ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
-            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
-        elif isinstance(exception,  (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
-            raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error)
-        elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
-                                    neExceptions.NeutronException)):
-            raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + message_error)
-        elif isinstance(exception, nvExceptions.Conflict):
-            raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + message_error)
-        elif isinstance(exception, vimconn.vimconnException):
-            raise exception
-        else:  # ()
-            self.logger.error("General Exception " + message_error, exc_info=True)
-            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
-
-    def _get_ids_from_name(self):
-        """
-         Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
-        :return: None
-        """
-        # get tenant_id if only tenant_name is supplied
-        self._reload_connection()
-        if not self.my_tenant_id:
-            raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}".
-                                                     format(self.tenant_name, self.tenant_id))
-        if self.config.get('security_groups') and not self.security_groups_id:
-            # convert from name to id
-            neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
-
-            self.security_groups_id = []
-            for sg in self.config.get('security_groups'):
-                for neutron_sg in neutron_sg_list:
-                    if sg in (neutron_sg["id"], neutron_sg["name"]):
-                        self.security_groups_id.append(neutron_sg["id"])
-                        break
-                else:
-                    self.security_groups_id = None
-                    raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg))
-
-    def check_vim_connectivity(self):
-        # just get network list to check connectivity and credentials
-        self.get_network_list(filter_dict={})
-
-    def get_tenant_list(self, filter_dict={}):
-        '''Obtain tenants of VIM
-        filter_dict can contain the following keys:
-            name: filter by tenant name
-            id: filter by tenant uuid/id
-            <other VIM specific>
-        Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
-        '''
-        self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
-        try:
-            self._reload_connection()
-            if self.api_version3:
-                project_class_list = self.keystone.projects.list(name=filter_dict.get("name"))
-            else:
-                project_class_list = self.keystone.tenants.findall(**filter_dict)
-            project_list=[]
-            for project in project_class_list:
-                if filter_dict.get('id') and filter_dict["id"] != project.id:
-                    continue
-                project_list.append(project.to_dict())
-            return project_list
-        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def new_tenant(self, tenant_name, tenant_description):
-        '''Adds a new tenant to openstack VIM. Returns the tenant identifier'''
-        self.logger.debug("Adding a new tenant name: %s", tenant_name)
-        try:
-            self._reload_connection()
-            if self.api_version3:
-                project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"),
-                                                        description=tenant_description, is_domain=False)
-            else:
-                project = self.keystone.tenants.create(tenant_name, tenant_description)
-            return project.id
-        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)  as e:
-            self._format_exception(e)
-
-    def delete_tenant(self, tenant_id):
-        '''Delete a tenant from openstack VIM. Returns the old tenant identifier'''
-        self.logger.debug("Deleting tenant %s from VIM", tenant_id)
-        try:
-            self._reload_connection()
-            if self.api_version3:
-                self.keystone.projects.delete(tenant_id)
-            else:
-                self.keystone.tenants.delete(tenant_id)
-            return tenant_id
-        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)  as e:
-            self._format_exception(e)
-
-    def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network
-                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
-                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway_address': (Optional) ip_schema, that is X.X.X.X
-                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
-                'dhcp_enabled': True or False
-                'dhcp_start_address': ip_schema, first IP to grant
-                'dhcp_count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
-        # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
-        try:
-            new_net = None
-            created_items = {}
-            self._reload_connection()
-            network_dict = {'name': net_name, 'admin_state_up': True}
-            if net_type=="data" or net_type=="ptp":
-                if self.config.get('dataplane_physical_net') == None:
-                    raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network")
-                if not self.config.get('multisegment_support'):
-                    network_dict["provider:physical_network"] = self.config[
-                        'dataplane_physical_net']  # "physnet_sriov" #TODO physical
-                    network_dict["provider:network_type"] = "vlan"
-                    if vlan!=None:
-                        network_dict["provider:network_type"] = vlan
-                else:
-                    ###### Multi-segment case ######
-                    segment_list = []
-                    segment1_dict = {}
-                    segment1_dict["provider:physical_network"] = ''
-                    segment1_dict["provider:network_type"]     = 'vxlan'
-                    segment_list.append(segment1_dict)
-                    segment2_dict = {}
-                    segment2_dict["provider:physical_network"] = self.config['dataplane_physical_net']
-                    segment2_dict["provider:network_type"]     = "vlan"
-                    if self.config.get('multisegment_vlan_range'):
-                        vlanID = self._generate_multisegment_vlanID()
-                        segment2_dict["provider:segmentation_id"] = vlanID
-                    # else
-                    #     raise vimconn.vimconnConflictException(
-                    #         "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network")
-                    segment_list.append(segment2_dict)
-                    network_dict["segments"] = segment_list
-
-                ####### VIO Specific Changes #########
-                if self.vim_type == "VIO":
-                    if vlan is not None:
-                        network_dict["provider:segmentation_id"] = vlan
-                    else:
-                        if self.config.get('dataplane_net_vlan_range') is None:
-                            raise vimconn.vimconnConflictException("You must provide "\
-                                "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\
-                                "at config value before creating sriov network with vlan tag")
-
-                        network_dict["provider:segmentation_id"] = self._generate_vlanID()
-
-            network_dict["shared"] = shared
-            if self.config.get("disable_network_port_security"):
-                network_dict["port_security_enabled"] = False
-            new_net = self.neutron.create_network({'network':network_dict})
-            # print new_net
-            # create subnetwork, even if there is no profile
-            if not ip_profile:
-                ip_profile = {}
-            if not ip_profile.get('subnet_address'):
-                #Fake subnet is required
-                subnet_rand = random.randint(0, 255)
-                ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
-            if 'ip_version' not in ip_profile:
-                ip_profile['ip_version'] = "IPv4"
-            subnet = {"name": net_name+"-subnet",
-                    "network_id": new_net["network"]["id"],
-                    "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
-                    "cidr": ip_profile['subnet_address']
-                    }
-            # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
-            if ip_profile.get('gateway_address'):
-                subnet['gateway_ip'] = ip_profile['gateway_address']
-            else:
-                subnet['gateway_ip'] = None
-            if ip_profile.get('dns_address'):
-                subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
-            if 'dhcp_enabled' in ip_profile:
-                subnet['enable_dhcp'] = False if \
-                    ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True
-            if ip_profile.get('dhcp_start_address'):
-                subnet['allocation_pools'] = []
-                subnet['allocation_pools'].append(dict())
-                subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
-            if ip_profile.get('dhcp_count'):
-                #parts = ip_profile['dhcp_start_address'].split('.')
-                #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
-                ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
-                ip_int += ip_profile['dhcp_count'] - 1
-                ip_str = str(netaddr.IPAddress(ip_int))
-                subnet['allocation_pools'][0]['end'] = ip_str
-            #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
-            self.neutron.create_subnet({"subnet": subnet} )
-
-            if net_type == "data" and self.config.get('multisegment_support'):
-                if self.config.get('l2gw_support'):
-                    l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
-                    for l2gw in l2gw_list:
-                        l2gw_conn = {}
-                        l2gw_conn["l2_gateway_id"] = l2gw["id"]
-                        l2gw_conn["network_id"] = new_net["network"]["id"]
-                        l2gw_conn["segmentation_id"] = str(vlanID)
-                        new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn})
-                        created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True
-            return new_net["network"]["id"], created_items
-        except Exception as e:
-            #delete l2gw connections (if any) before deleting the network
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "l2gwconn":
-                        self.neutron.delete_l2_gateway_connection(k_id)
-                except Exception as e2:
-                    self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2))
-            if new_net:
-                self.neutron.delete_network(new_net['network']['id'])
-            self._format_exception(e)
-
-    def get_network_list(self, filter_dict={}):
-        '''Obtain tenant networks of VIM
-        Filter_dict can be:
-            name: network name
-            id: network uuid
-            shared: boolean
-            tenant_id: tenant
-            admin_state_up: boolean
-            status: 'ACTIVE'
-        Returns the network list of dictionaries
-        '''
-        self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')  #T ODO check
-            net_dict = self.neutron.list_networks(**filter_dict_os)
-            net_list = net_dict["networks"]
-            self.__net_os2mano(net_list)
-            return net_list
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def get_network(self, net_id):
-        '''Obtain details of network from VIM
-        Returns the network information from a network id'''
-        self.logger.debug(" Getting tenant network %s from VIM", net_id)
-        filter_dict={"id": net_id}
-        net_list = self.get_network_list(filter_dict)
-        if len(net_list)==0:
-            raise vimconn.vimconnNotFoundException("Network '{}' not found".format(net_id))
-        elif len(net_list)>1:
-            raise vimconn.vimconnConflictException("Found more than one network with this criteria")
-        net = net_list[0]
-        subnets=[]
-        for subnet_id in net.get("subnets", () ):
-            try:
-                subnet = self.neutron.show_subnet(subnet_id)
-            except Exception as e:
-                self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e)))
-                subnet = {"id": subnet_id, "fault": str(e)}
-            subnets.append(subnet)
-        net["subnets"] = subnets
-        net["encapsulation"] = net.get('provider:network_type')
-        net["encapsulation_type"] = net.get('provider:network_type')
-        net["segmentation_id"] = net.get('provider:segmentation_id')
-        net["encapsulation_id"] = net.get('provider:segmentation_id')
-        return net
-
-    def delete_network(self, net_id, created_items=None):
-        """
-        Removes a tenant network from VIM and its associated elements
-        :param net_id: VIM identifier of the network, provided by method new_network
-        :param created_items: dictionary with extra items to be deleted. provided by method new_network
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-        self.logger.debug("Deleting network '%s' from VIM", net_id)
-        if created_items == None:
-            created_items = {}
-        try:
-            self._reload_connection()
-            #delete l2gw connections (if any) before deleting the network
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "l2gwconn":
-                        self.neutron.delete_l2_gateway_connection(k_id)
-                except Exception as e:
-                    self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e))
-            #delete VM ports attached to this networks before the network
-            ports = self.neutron.list_ports(network_id=net_id)
-            for p in ports['ports']:
-                try:
-                    self.neutron.delete_port(p["id"])
-                except Exception as e:
-                    self.logger.error("Error deleting port %s: %s", p["id"], str(e))
-            self.neutron.delete_network(net_id)
-            return net_id
-        except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException,
-                ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def refresh_nets_status(self, net_list):
-        '''Get the status of the networks
-           Params: the list of network identifiers
-           Returns a dictionary with:
-                net_id:         #VIM id of this network
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, INACTIVE, DOWN (admin down),
-                                #  BUILD (on building process)
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-
-        '''
-        net_dict={}
-        for net_id in net_list:
-            net = {}
-            try:
-                net_vim = self.get_network(net_id)
-                if net_vim['status'] in netStatus2manoFormat:
-                    net["status"] = netStatus2manoFormat[ net_vim['status'] ]
-                else:
-                    net["status"] = "OTHER"
-                    net["error_msg"] = "VIM status reported " + net_vim['status']
-
-                if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
-                    net['status'] = 'DOWN'
-
-                net['vim_info'] = self.serialize(net_vim)
-
-                if net_vim.get('fault'):  #TODO
-                    net['error_msg'] = str(net_vim['fault'])
-            except vimconn.vimconnNotFoundException as e:
-                self.logger.error("Exception getting net status: %s", str(e))
-                net['status'] = "DELETED"
-                net['error_msg'] = str(e)
-            except vimconn.vimconnException as e:
-                self.logger.error("Exception getting net status: %s", str(e))
-                net['status'] = "VIM_ERROR"
-                net['error_msg'] = str(e)
-            net_dict[net_id] = net
-        return net_dict
-
-    def get_flavor(self, flavor_id):
-        '''Obtain flavor details from the  VIM. Returns the flavor dict details'''
-        self.logger.debug("Getting flavor '%s'", flavor_id)
-        try:
-            self._reload_connection()
-            flavor = self.nova.flavors.find(id=flavor_id)
-            #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-            return flavor.to_dict()
-        except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def get_flavor_id_from_data(self, flavor_dict):
-        """Obtain flavor id that match the flavor description
-           Returns the flavor_id or raises a vimconnNotFoundException
-           flavor_dict: contains the required ram, vcpus, disk
-           If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
-                and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
-                vimconnNotFoundException is raised
-        """
-        exact_match = False if self.config.get('use_existing_flavors') else True
-        try:
-            self._reload_connection()
-            flavor_candidate_id = None
-            flavor_candidate_data = (10000, 10000, 10000)
-            flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
-            # numa=None
-            extended = flavor_dict.get("extended", {})
-            if extended:
-                #TODO
-                raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemented")
-                # if len(numas) > 1:
-                #     raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa")
-                # numa=numas[0]
-                # numas = extended.get("numas")
-            for flavor in self.nova.flavors.list():
-                epa = flavor.get_keys()
-                if epa:
-                    continue
-                    # TODO
-                flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
-                if flavor_data == flavor_target:
-                    return flavor.id
-                elif not exact_match and flavor_target < flavor_data < flavor_candidate_data:
-                    flavor_candidate_id = flavor.id
-                    flavor_candidate_data = flavor_data
-            if not exact_match and flavor_candidate_id:
-                return flavor_candidate_id
-            raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
-        except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def process_resource_quota(self, quota, prefix, extra_specs):
-        """
-        :param prefix:
-        :param extra_specs: 
-        :return:
-        """
-        if 'limit' in quota:
-            extra_specs["quota:" + prefix + "_limit"] = quota['limit']
-        if 'reserve' in quota:
-            extra_specs["quota:" + prefix + "_reservation"] = quota['reserve']
-        if 'shares' in quota:
-            extra_specs["quota:" + prefix + "_shares_level"] = "custom"
-            extra_specs["quota:" + prefix + "_shares_share"] = quota['shares']
-
-    def new_flavor(self, flavor_data, change_name_if_used=True):
-        '''Adds a tenant flavor to openstack VIM
-        if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
-        Returns the flavor identifier
-        '''
-        self.logger.debug("Adding flavor '%s'", str(flavor_data))
-        retry=0
-        max_retries=3
-        name_suffix = 0
-        try:
-            name=flavor_data['name']
-            while retry<max_retries:
-                retry+=1
-                try:
-                    self._reload_connection()
-                    if change_name_if_used:
-                        #get used names
-                        fl_names=[]
-                        fl=self.nova.flavors.list()
-                        for f in fl:
-                            fl_names.append(f.name)
-                        while name in fl_names:
-                            name_suffix += 1
-                            name = flavor_data['name']+"-" + str(name_suffix)
-
-                    ram = flavor_data.get('ram',64)
-                    vcpus = flavor_data.get('vcpus',1)
-                    extra_specs={}
-
-                    extended = flavor_data.get("extended")
-                    if extended:
-                        numas=extended.get("numas")
-                        if numas:
-                            numa_nodes = len(numas)
-                            if numa_nodes > 1:
-                                return -1, "Can not add flavor with more than one numa"
-                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
-                            extra_specs["hw:mem_page_size"] = "large"
-                            extra_specs["hw:cpu_policy"] = "dedicated"
-                            extra_specs["hw:numa_mempolicy"] = "strict"
-                            if self.vim_type == "VIO":
-                                extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-                                extra_specs["vmware:latency_sensitivity_level"] = "high"
-                            for numa in numas:
-                                #overwrite ram and vcpus
-                                #check if key 'memory' is present in numa else use ram value at flavor
-                                if 'memory' in numa:
-                                    ram = numa['memory']*1024
-                                #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
-                                extra_specs["hw:cpu_sockets"] = 1
-                                if 'paired-threads' in numa:
-                                    vcpus = numa['paired-threads']*2
-                                    #cpu_thread_policy "require" implies that the compute node must have an STM architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "require"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif 'cores' in numa:
-                                    vcpus = numa['cores']
-                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
-                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif 'threads' in numa:
-                                    vcpus = numa['threads']
-                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                # for interface in numa.get("interfaces",() ):
-                                #     if interface["dedicated"]=="yes":
-                                #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
-                        elif extended.get("cpu-quota"):
-                            self.process_resource_quota(extended.get("cpu-quota"), "cpu", extra_specs)
-                        if extended.get("mem-quota"):
-                            self.process_resource_quota(extended.get("mem-quota"), "memory", extra_specs)
-                        if extended.get("vif-quota"):
-                            self.process_resource_quota(extended.get("vif-quota"), "vif", extra_specs)
-                        if extended.get("disk-io-quota"):
-                            self.process_resource_quota(extended.get("disk-io-quota"), "disk_io", extra_specs)
-                    #create flavor
-                    new_flavor=self.nova.flavors.create(name,
-                                    ram,
-                                    vcpus,
-                                    flavor_data.get('disk',0),
-                                    is_public=flavor_data.get('is_public', True)
-                                )
-                    #add metadata
-                    if extra_specs:
-                        new_flavor.set_keys(extra_specs)
-                    return new_flavor.id
-                except nvExceptions.Conflict as e:
-                    if change_name_if_used and retry < max_retries:
-                        continue
-                    self._format_exception(e)
-        #except nvExceptions.BadRequest as e:
-        except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
-            self._format_exception(e)
-
-    def delete_flavor(self,flavor_id):
-        '''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
-        '''
-        try:
-            self._reload_connection()
-            self.nova.flavors.delete(flavor_id)
-            return flavor_id
-        #except nvExceptions.BadRequest as e:
-        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def new_image(self,image_dict):
-        '''
-        Adds a tenant image to VIM. imge_dict is a dictionary with:
-            name: name
-            disk_format: qcow2, vhd, vmdk, raw (by default), ...
-            location: path or URI
-            public: "yes" or "no"
-            metadata: metadata of the image
-        Returns the image_id
-        '''
-        retry=0
-        max_retries=3
-        while retry<max_retries:
-            retry+=1
-            try:
-                self._reload_connection()
-                #determine format  http://docs.openstack.org/developer/glance/formats.html
-                if "disk_format" in image_dict:
-                    disk_format=image_dict["disk_format"]
-                else: #autodiscover based on extension
-                    if image_dict['location'].endswith(".qcow2"):
-                        disk_format="qcow2"
-                    elif image_dict['location'].endswith(".vhd"):
-                        disk_format="vhd"
-                    elif image_dict['location'].endswith(".vmdk"):
-                        disk_format="vmdk"
-                    elif image_dict['location'].endswith(".vdi"):
-                        disk_format="vdi"
-                    elif image_dict['location'].endswith(".iso"):
-                        disk_format="iso"
-                    elif image_dict['location'].endswith(".aki"):
-                        disk_format="aki"
-                    elif image_dict['location'].endswith(".ari"):
-                        disk_format="ari"
-                    elif image_dict['location'].endswith(".ami"):
-                        disk_format="ami"
-                    else:
-                        disk_format="raw"
-                self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
-                if self.vim_type == "VIO":
-                    container_format = "bare"
-                    if 'container_format' in image_dict:
-                        container_format = image_dict['container_format']
-                    new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
-                                                          disk_format=disk_format)
-                else:
-                    new_image = self.glance.images.create(name=image_dict['name'])
-                if image_dict['location'].startswith("http"):
-                    # TODO there is not a method to direct download. It must be downloaded locally with requests
-                    raise vimconn.vimconnNotImplemented("Cannot create image from URL")
-                else: #local path
-                    with open(image_dict['location']) as fimage:
-                        self.glance.images.upload(new_image.id, fimage)
-                        #new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
-                        #    container_format="bare", data=fimage, disk_format=disk_format)
-                metadata_to_load = image_dict.get('metadata')
-                # TODO location is a reserved word for current openstack versions. fixed for VIO please check for openstack
-                if self.vim_type == "VIO":
-                    metadata_to_load['upload_location'] = image_dict['location']
-                else:
-                    metadata_to_load['location'] = image_dict['location']
-                self.glance.images.update(new_image.id, **metadata_to_load)
-                return new_image.id
-            except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
-                self._format_exception(e)
-            except (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, ConnectionError) as e:
-                if retry==max_retries:
-                    continue
-                self._format_exception(e)
-            except IOError as e:  #can not open the file
-                raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e)+ " for " + image_dict['location'],
-                                                         http_code=vimconn.HTTP_Bad_Request)
-
-    def delete_image(self, image_id):
-        '''Deletes a tenant image from openstack VIM. Returns the old id
-        '''
-        try:
-            self._reload_connection()
-            self.glance.images.delete(image_id)
-            return image_id
-        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: #TODO remove
-            self._format_exception(e)
-
-    def get_image_id_from_path(self, path):
-        '''Get the image id from image path in the VIM database. Returns the image_id'''
-        try:
-            self._reload_connection()
-            images = self.glance.images.list()
-            for image in images:
-                if image.metadata.get("location")==path:
-                    return image.id
-            raise vimconn.vimconnNotFoundException("image with location '{}' not found".format( path))
-        except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
-            self._format_exception(e)
-
-    def get_image_list(self, filter_dict={}):
-        '''Obtain tenant images from VIM
-        Filter_dict can be:
-            id: image id
-            name: image name
-            checksum: image checksum
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        '''
-        self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-            #First we filter by the available filter fields: name, id. The others are removed.
-            image_list = self.glance.images.list()
-            filtered_list = []
-            for image in image_list:
-                try:
-                    if filter_dict.get("name") and image["name"] != filter_dict["name"]:
-                        continue
-                    if filter_dict.get("id") and image["id"] != filter_dict["id"]:
-                        continue
-                    if filter_dict.get("checksum") and image["checksum"] != filter_dict["checksum"]:
-                        continue
-
-                    filtered_list.append(image.copy())
-                except gl1Exceptions.HTTPNotFound:
-                    pass
-            return filtered_list
-        except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
-            self._format_exception(e)
-
-    def __wait_for_vm(self, vm_id, status):
-        """wait until vm is in the desired status and return True.
-        If the VM gets in ERROR status, return false.
-        If the timeout is reached generate an exception"""
-        elapsed_time = 0
-        while elapsed_time < server_timeout:
-            vm_status = self.nova.servers.get(vm_id).status
-            if vm_status == status:
-                return True
-            if vm_status == 'ERROR':
-                return False
-            time.sleep(5)
-            elapsed_time += 5
-
-        # if we exceeded the timeout rollback
-        if elapsed_time >= server_timeout:
-            raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
-                                           http_code=vimconn.HTTP_Request_Timeout)
-
-    def _get_openstack_availablity_zones(self):
-        """
-        Get from openstack availability zones available
-        :return:
-        """
-        try:
-            openstack_availability_zone = self.nova.availability_zones.list()
-            openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
-                                           if zone.zoneName != 'internal']
-            return openstack_availability_zone
-        except Exception as e:
-            return None
-
-    def _set_availablity_zones(self):
-        """
-        Set vim availablity zone
-        :return:
-        """
-
-        if 'availability_zone' in self.config:
-            vim_availability_zones = self.config.get('availability_zone')
-            if isinstance(vim_availability_zones, str):
-                self.availability_zone = [vim_availability_zones]
-            elif isinstance(vim_availability_zones, list):
-                self.availability_zone = vim_availability_zones
-        else:
-            self.availability_zone = self._get_openstack_availablity_zones()
-
-    def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
-        """
-        Return thge availability zone to be used by the created VM.
-        :return: The VIM availability zone to be used or None
-        """
-        if availability_zone_index is None:
-            if not self.config.get('availability_zone'):
-                return None
-            elif isinstance(self.config.get('availability_zone'), str):
-                return self.config['availability_zone']
-            else:
-                # TODO consider using a different parameter at config for default AV and AV list match
-                return self.config['availability_zone'][0]
-
-        vim_availability_zones = self.availability_zone
-        # check if VIM offer enough availability zones describe in the VNFD
-        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
-            # check if all the names of NFV AV match VIM AV names
-            match_by_index = False
-            for av in availability_zone_list:
-                if av not in vim_availability_zones:
-                    match_by_index = True
-                    break
-            if match_by_index:
-                return vim_availability_zones[availability_zone_index]
-            else:
-                return availability_zone_list[availability_zone_index]
-        else:
-            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
-
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
-                       availability_zone_index=None, availability_zone_list=None):
-        """Adds a VM instance to VIM
-        Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: iamge and flavor uuid
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign, ignored because openstack lack #TODO
-                model: interface model, ignored #TODO
-                mac_address: used for  SR-IOV ifaces #TODO for other types
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
-                vim_id: filled/added by this function
-                floating_ip: True/False (or it can be None)
-            'cloud_config': (optional) dictionary with:
-            'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-            'users': (optional) list of users to be inserted, each item is a dict with:
-                'name': (mandatory) user name,
-                'key-pairs': (optional) list of strings with the public key to be inserted to the user
-            'user-data': (optional) string is a text script to be passed directly to cloud-init
-            'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                'dest': (mandatory) string with the destination absolute path
-                'encoding': (optional, by default text). Can be one of:
-                    'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                'content' (mandatory): string with the content of the file
-                'permissions': (optional) string with file permissions, typically octal notation '0644'
-                'owner': (optional) file owner, string with the format 'owner:group'
-            'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-                'vim_id' (optional) should use this existing volume id
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-                #TODO ip, security groups
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
-        try:
-            server = None
-            created_items = {}
-            # metadata = {}
-            net_list_vim = []
-            external_network = []   # list of external networks to be connected to instance, later on used to create floating_ip
-            no_secured_ports = []   # List of port-is with port-security disabled
-            self._reload_connection()
-            # metadata_vpci = {}   # For a specific neutron plugin
-            block_device_mapping = None
-
-            for net in net_list:
-                if not net.get("net_id"):   # skip non connected iface
-                    continue
-
-                port_dict = {
-                    "network_id": net["net_id"],
-                    "name": net.get("name"),
-                    "admin_state_up": True
-                }
-                if self.config.get("security_groups") and net.get("port_security") is not False and \
-                        not self.config.get("no_port_security_extension"):
-                    if not self.security_groups_id:
-                        self._get_ids_from_name()
-                    port_dict["security_groups"] = self.security_groups_id
-
-                if net["type"]=="virtual":
-                    pass
-                    # if "vpci" in net:
-                    #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
-                    # if "vpci" in net:
-                    #     if "VF" not in metadata_vpci:
-                    #         metadata_vpci["VF"]=[]
-                    #     metadata_vpci["VF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"]="direct"
-                    # VIO specific Changes
-                    if self.vim_type == "VIO":
-                        # Need to create port with port_security_enabled = False and no-security-groups
-                        port_dict["port_security_enabled"]=False
-                        port_dict["provider_security_groups"]=[]
-                        port_dict["security_groups"]=[]
-                else:   # For PT PCI-PASSTHROUGH
-                    # VIO specific Changes
-                    # Current VIO release does not support port with type 'direct-physical'
-                    # So no need to create virtual port in case of PCI-device.
-                    # Will update port_dict code when support gets added in next VIO release
-                    if self.vim_type == "VIO":
-                        raise vimconn.vimconnNotSupportedException(
-                            "Current VIO release does not support full passthrough (PT)")
-                    # if "vpci" in net:
-                    #     if "PF" not in metadata_vpci:
-                    #         metadata_vpci["PF"]=[]
-                    #     metadata_vpci["PF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"]="direct-physical"
-                if not port_dict["name"]:
-                    port_dict["name"]=name
-                if net.get("mac_address"):
-                    port_dict["mac_address"]=net["mac_address"]
-                if net.get("ip_address"):
-                    port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}]
-                    # TODO add 'subnet_id': <subnet_id>
-                new_port = self.neutron.create_port({"port": port_dict })
-                created_items["port:" + str(new_port["port"]["id"])] = True
-                net["mac_adress"] = new_port["port"]["mac_address"]
-                net["vim_id"] = new_port["port"]["id"]
-                # if try to use a network without subnetwork, it will return a emtpy list
-                fixed_ips = new_port["port"].get("fixed_ips")
-                if fixed_ips:
-                    net["ip"] = fixed_ips[0].get("ip_address")
-                else:
-                    net["ip"] = None
-
-                port = {"port-id": new_port["port"]["id"]}
-                if float(self.nova.api_version.get_string()) >= 2.32:
-                    port["tag"] = new_port["port"]["name"]
-                net_list_vim.append(port)
-
-                if net.get('floating_ip', False):
-                    net['exit_on_floating_ip_error'] = True
-                    external_network.append(net)
-                elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
-                    net['exit_on_floating_ip_error'] = False
-                    external_network.append(net)
-                    net['floating_ip'] = self.config.get('use_floating_ip')
-
-                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
-                # As a workaround we wait until the VM is active and then disable the port-security
-                if net.get("port_security") == False and not self.config.get("no_port_security_extension"):
-                    no_secured_ports.append(new_port["port"]["id"])
-
-            # if metadata_vpci:
-            #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
-            #     if len(metadata["pci_assignement"]) >255:
-            #         #limit the metadata size
-            #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
-            #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
-            #         metadata = {}
-
-            self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
-                              name, image_id, flavor_id, str(net_list_vim), description)
-
-            # cloud config
-            config_drive, userdata = self._create_user_data(cloud_config)
-
-            # Create additional volumes in case these are present in disk_list
-            base_disk_index = ord('b')
-            if disk_list:
-                block_device_mapping = {}
-                for disk in disk_list:
-                    if disk.get('vim_id'):
-                        block_device_mapping['_vd' + chr(base_disk_index)] = disk['vim_id']
-                    else:
-                        if 'image_id' in disk:
-                            volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
-                                                                chr(base_disk_index), imageRef=disk['image_id'])
-                        else:
-                            volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
-                                                                chr(base_disk_index))
-                        created_items["volume:" + str(volume.id)] = True
-                        block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
-                    base_disk_index += 1
-
-                # Wait until created volumes are with status available
-                elapsed_time = 0
-                while elapsed_time < volume_timeout:
-                    for created_item in created_items:
-                        v, _, volume_id = created_item.partition(":")
-                        if v == 'volume':
-                            if self.cinder.volumes.get(volume_id).status != 'available':
-                                break
-                    else:  # all ready: break from while
-                        break
-                    time.sleep(5)
-                    elapsed_time += 5
-                # If we exceeded the timeout rollback
-                if elapsed_time >= volume_timeout:
-                    raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
-                                                   http_code=vimconn.HTTP_Request_Timeout)
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
-
-            self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
-                              "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
-                              "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
-                                                                self.config.get("security_groups"), vm_av_zone,
-                                                                self.config.get('keypair'), userdata, config_drive,
-                                                                block_device_mapping))
-            server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
-                                              security_groups=self.config.get("security_groups"),
-                                              # TODO remove security_groups in future versions. Already at neutron port
-                                              availability_zone=vm_av_zone,
-                                              key_name=self.config.get('keypair'),
-                                              userdata=userdata,
-                                              config_drive=config_drive,
-                                              block_device_mapping=block_device_mapping
-                                              )  # , description=description)
-
-            vm_start_time = time.time()
-            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
-            if no_secured_ports:
-                self.__wait_for_vm(server.id, 'ACTIVE')
-
-            for port_id in no_secured_ports:
-                try:
-                    self.neutron.update_port(port_id,
-                                             {"port": {"port_security_enabled": False, "security_groups": None}})
-                except Exception as e:
-                    raise vimconn.vimconnException("It was not possible to disable port security for port {}".format(
-                        port_id))
-            # print "DONE :-)", server
-
-            # pool_id = None
-            if external_network:
-                floating_ips = self.neutron.list_floatingips().get("floatingips", ())
-            for floating_network in external_network:
-                try:
-                    assigned = False
-                    while not assigned:
-                        if floating_ips:
-                            ip = floating_ips.pop(0)
-                            if ip.get("port_id", False) or ip.get('tenant_id') != server.tenant_id:
-                                continue
-                            if isinstance(floating_network['floating_ip'], str):
-                                if ip.get("floating_network_id") != floating_network['floating_ip']:
-                                    continue
-                            free_floating_ip = ip.get("floating_ip_address")
-                        else:
-                            if isinstance(floating_network['floating_ip'], str) and \
-                                floating_network['floating_ip'].lower() != "true":
-                                pool_id = floating_network['floating_ip']
-                            else:
-                                # Find the external network
-                                external_nets = list()
-                                for net in self.neutron.list_networks()['networks']:
-                                    if net['router:external']:
-                                            external_nets.append(net)
-
-                                if len(external_nets) == 0:
-                                    raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
-                                                                   "network is present",
-                                                                    http_code=vimconn.HTTP_Conflict)
-                                if len(external_nets) > 1:
-                                    raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
-                                                                   "external networks are present",
-                                                                   http_code=vimconn.HTTP_Conflict)
-
-                                pool_id = external_nets[0].get('id')
-                            param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
-                            try:
-                                # self.logger.debug("Creating floating IP")
-                                new_floating_ip = self.neutron.create_floatingip(param)
-                                free_floating_ip = new_floating_ip['floatingip']['floating_ip_address']
-                            except Exception as e:
-                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot create new floating_ip " +
-                                                               str(e), http_code=vimconn.HTTP_Conflict)
-
-                        fix_ip = floating_network.get('ip')
-                        while not assigned:
-                            try:
-                                server.add_floating_ip(free_floating_ip, fix_ip)
-                                assigned = True
-                            except Exception as e:
-                                # openstack need some time after VM creation to asign an IP. So retry if fails
-                                vm_status = self.nova.servers.get(server.id).status
-                                if vm_status != 'ACTIVE' and vm_status != 'ERROR':
-                                    if time.time() - vm_start_time < server_timeout:
-                                        time.sleep(5)
-                                        continue
-                                raise vimconn.vimconnException(
-                                    "Cannot create floating_ip: {} {}".format(type(e).__name__, e),
-                                    http_code=vimconn.HTTP_Conflict)
-
-                except Exception as e:
-                    if not floating_network['exit_on_floating_ip_error']:
-                        self.logger.warn("Cannot create floating_ip. %s", str(e))
-                        continue
-                    raise
-
-            return server.id, created_items
-#        except nvExceptions.NotFound as e:
-#            error_value=-vimconn.HTTP_Not_Found
-#            error_text= "vm instance %s not found" % vm_id
-#        except TypeError as e:
-#            raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
-
-        except Exception as e:
-            server_id = None
-            if server:
-                server_id = server.id
-            try:
-                self.delete_vminstance(server_id, created_items)
-            except Exception as e2:
-                self.logger.error("new_vminstance rollback fail {}".format(e2))
-
-            self._format_exception(e)
-
-    def get_vminstance(self,vm_id):
-        '''Returns the VM instance information from VIM'''
-        #self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-            return server.to_dict()
-        except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
-            self._format_exception(e)
-
-    def get_vminstance_console(self,vm_id, console_type="vnc"):
-        '''
-        Get a console for the virtual machine
-        Params:
-            vm_id: uuid of the VM
-            console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types,
-                "rdp-html5" for RDP types, "spice-html5" for SPICE types
-        Returns dict with the console parameters:
-                protocol: ssh, ftp, http, https, ...
-                server:   usually ip address
-                port:     the http, ssh, ... port
-                suffix:   extra text, e.g. the http path and query string
-        '''
-        self.logger.debug("Getting VM CONSOLE from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            if console_type == None or console_type == "novnc":
-                console_dict = server.get_vnc_console("novnc")
-            elif console_type == "xvpvnc":
-                console_dict = server.get_vnc_console(console_type)
-            elif console_type == "rdp-html5":
-                console_dict = server.get_rdp_console(console_type)
-            elif console_type == "spice-html5":
-                console_dict = server.get_spice_console(console_type)
-            else:
-                raise vimconn.vimconnException("console type '{}' not allowed".format(console_type), http_code=vimconn.HTTP_Bad_Request)
-
-            console_dict1 = console_dict.get("console")
-            if console_dict1:
-                console_url = console_dict1.get("url")
-                if console_url:
-                    #parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
-                    port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
-                    if protocol_index < 0 or port_index<0 or suffix_index<0:
-                        return -vimconn.HTTP_Internal_Server_Error, "Unexpected response from VIM"
-                    console_dict={"protocol": console_url[0:protocol_index],
-                                  "server":   console_url[protocol_index+2:port_index],
-                                  "port":     console_url[port_index:suffix_index],
-                                  "suffix":   console_url[suffix_index+1:]
-                                  }
-                    protocol_index += 2
-                    return console_dict
-            raise vimconn.vimconnUnexpectedResponse("Unexpected response from VIM")
-
-        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e:
-            self._format_exception(e)
-
-    def delete_vminstance(self, vm_id, created_items=None):
-        '''Removes a VM instance from VIM. Returns the old identifier
-        '''
-        #print "osconnector: Getting VM from VIM"
-        if created_items == None:
-            created_items = {}
-        try:
-            self._reload_connection()
-            # delete VM ports attached to this networks before the virtual machine
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "port":
-                        self.neutron.delete_port(k_id)
-                except Exception as e:
-                    self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
-
-            # #commented because detaching the volumes makes the servers.delete not work properly ?!?
-            # #dettach volumes attached
-            # server = self.nova.servers.get(vm_id)
-            # volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']   #volume['id']
-            # #for volume in volumes_attached_dict:
-            # #    self.cinder.volumes.detach(volume['id'])
-
-            if vm_id:
-                self.nova.servers.delete(vm_id)
-
-            # delete volumes. Although having detached, they should have in active status before deleting
-            # we ensure in this loop
-            keep_waiting = True
-            elapsed_time = 0
-            while keep_waiting and elapsed_time < volume_timeout:
-                keep_waiting = False
-                for k, v in created_items.items():
-                    if not v:  # skip already deleted
-                        continue
-                    try:
-                        k_item, _, k_id = k.partition(":")
-                        if k_item == "volume":
-                            if self.cinder.volumes.get(k_id).status != 'available':
-                                keep_waiting = True
-                            else:
-                                self.cinder.volumes.delete(k_id)
-                    except Exception as e:
-                        self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e))
-                if keep_waiting:
-                    time.sleep(1)
-                    elapsed_time += 1
-            return None
-        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def refresh_vms_status(self, vm_list):
-        '''Get the status of the virtual machines and their interfaces/ports
-           Params: the list of VM identifiers
-           Returns a dictionary with:
-                vm_id:          #VIM id of this Virtual Machine
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                                #  CREATING (on building process), ERROR
-                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                    interfaces:
-                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                        vim_net_id:       #network id where this interface is connected
-                        vim_interface_id: #interface/port VIM id
-                        ip_address:       #null, or text with IPv4, IPv6 address
-                        compute_node:     #identification of compute node where PF,VF interface is allocated
-                        pci:              #PCI address of the NIC that hosts the PF,VF
-                        vlan:             #physical VLAN used for VF
-        '''
-        vm_dict={}
-        self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
-        for vm_id in vm_list:
-            vm={}
-            try:
-                vm_vim = self.get_vminstance(vm_id)
-                if vm_vim['status'] in vmStatus2manoFormat:
-                    vm['status']    =  vmStatus2manoFormat[ vm_vim['status'] ]
-                else:
-                    vm['status']    = "OTHER"
-                    vm['error_msg'] = "VIM status reported " + vm_vim['status']
-
-                vm['vim_info'] = self.serialize(vm_vim)
-
-                vm["interfaces"] = []
-                if vm_vim.get('fault'):
-                    vm['error_msg'] = str(vm_vim['fault'])
-                #get interfaces
-                try:
-                    self._reload_connection()
-                    port_dict = self.neutron.list_ports(device_id=vm_id)
-                    for port in port_dict["ports"]:
-                        interface={}
-                        interface['vim_info'] = self.serialize(port)
-                        interface["mac_address"] = port.get("mac_address")
-                        interface["vim_net_id"] = port["network_id"]
-                        interface["vim_interface_id"] = port["id"]
-                        # check if OS-EXT-SRV-ATTR:host is there,
-                        # in case of non-admin credentials, it will be missing
-                        if vm_vim.get('OS-EXT-SRV-ATTR:host'):
-                            interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
-                        interface["pci"] = None
-
-                        # check if binding:profile is there,
-                        # in case of non-admin credentials, it will be missing
-                        if port.get('binding:profile'):
-                            if port['binding:profile'].get('pci_slot'):
-                                # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
-                                # TODO: This is just a workaround valid for niantinc. Find a better way to do so
-                                #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
-                                pci = port['binding:profile']['pci_slot']
-                                # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
-                                interface["pci"] = pci
-                        interface["vlan"] = None
-                        #if network is of type vlan and port is of type direct (sr-iov) then set vlan id
-                        network = self.neutron.show_network(port["network_id"])
-                        if network['network'].get('provider:network_type') == 'vlan' and \
-                            port.get("binding:vnic_type") == "direct":
-                            interface["vlan"] = network['network'].get('provider:segmentation_id')
-                        ips=[]
-                        #look for floating ip address
-                        try:
-                            floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
-                            if floating_ip_dict.get("floatingips"):
-                                ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") )
-                        except Exception:
-                            pass
-
-                        for subnet in port["fixed_ips"]:
-                            ips.append(subnet["ip_address"])
-                        interface["ip_address"] = ";".join(ips)
-                        vm["interfaces"].append(interface)
-                except Exception as e:
-                    self.logger.error("Error getting vm interface information {}: {}".format(type(e).__name__, e),
-                                      exc_info=True)
-            except vimconn.vimconnNotFoundException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm['status'] = "DELETED"
-                vm['error_msg'] = str(e)
-            except vimconn.vimconnException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm['status'] = "VIM_ERROR"
-                vm['error_msg'] = str(e)
-            vm_dict[vm_id] = vm
-        return vm_dict
-
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        '''Send and action over a VM instance from VIM
-        Returns None or the console dict if the action was successfully sent to the VIM'''
-        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            if "start" in action_dict:
-                if action_dict["start"]=="rebuild":
-                    server.rebuild()
-                else:
-                    if server.status=="PAUSED":
-                        server.unpause()
-                    elif server.status=="SUSPENDED":
-                        server.resume()
-                    elif server.status=="SHUTOFF":
-                        server.start()
-            elif "pause" in action_dict:
-                server.pause()
-            elif "resume" in action_dict:
-                server.resume()
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                server.stop()
-            elif "forceOff" in action_dict:
-                server.stop() #TODO
-            elif "terminate" in action_dict:
-                server.delete()
-            elif "createImage" in action_dict:
-                server.create_image()
-                #"path":path_schema,
-                #"description":description_schema,
-                #"name":name_schema,
-                #"metadata":metadata_schema,
-                #"imageRef": id_schema,
-                #"disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            elif "rebuild" in action_dict:
-                server.rebuild(server.image['id'])
-            elif "reboot" in action_dict:
-                server.reboot() #reboot_type='SOFT'
-            elif "console" in action_dict:
-                console_type = action_dict["console"]
-                if console_type == None or console_type == "novnc":
-                    console_dict = server.get_vnc_console("novnc")
-                elif console_type == "xvpvnc":
-                    console_dict = server.get_vnc_console(console_type)
-                elif console_type == "rdp-html5":
-                    console_dict = server.get_rdp_console(console_type)
-                elif console_type == "spice-html5":
-                    console_dict = server.get_spice_console(console_type)
-                else:
-                    raise vimconn.vimconnException("console type '{}' not allowed".format(console_type),
-                                                   http_code=vimconn.HTTP_Bad_Request)
-                try:
-                    console_url = console_dict["console"]["url"]
-                    #parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
-                    port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
-                    if protocol_index < 0 or port_index<0 or suffix_index<0:
-                        raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
-                    console_dict2={"protocol": console_url[0:protocol_index],
-                                  "server":   console_url[protocol_index+2 : port_index],
-                                  "port":     int(console_url[port_index+1 : suffix_index]),
-                                  "suffix":   console_url[suffix_index+1:]
-                                  }
-                    return console_dict2
-                except Exception as e:
-                    raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
-
-            return None
-        except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
-            self._format_exception(e)
-        #TODO insert exception vimconn.HTTP_Unauthorized
-
-    ####### VIO Specific Changes #########
-    def _generate_vlanID(self):
-        """
-         Method to get unused vlanID
-            Args:
-                None
-            Returns:
-                vlanID
-        """
-        #Get used VLAN IDs
-        usedVlanIDs = []
-        networks = self.get_network_list()
-        for net in networks:
-            if net.get('provider:segmentation_id'):
-                usedVlanIDs.append(net.get('provider:segmentation_id'))
-        used_vlanIDs = set(usedVlanIDs)
-
-        #find unused VLAN ID
-        for vlanID_range in self.config.get('dataplane_net_vlan_range'):
-            try:
-                start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
-                for vlanID in xrange(start_vlanid, end_vlanid + 1):
-                    if vlanID not in used_vlanIDs:
-                        return vlanID
-            except Exception as exp:
-                raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
-        else:
-            raise vimconn.vimconnConflictException("Unable to create the SRIOV VLAN network."\
-                " All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range')))
-
-
-    def _generate_multisegment_vlanID(self):
-        """
-         Method to get unused vlanID
-            Args:
-                None
-            Returns:
-                vlanID
-        """
-        #Get used VLAN IDs
-        usedVlanIDs = []
-        networks = self.get_network_list()
-        for net in networks:
-            if net.get('provider:network_type') == "vlan" and net.get('provider:segmentation_id'):
-                usedVlanIDs.append(net.get('provider:segmentation_id'))
-            elif net.get('segments'):
-                for segment in net.get('segments'):
-                    if segment.get('provider:network_type') == "vlan" and segment.get('provider:segmentation_id'):
-                        usedVlanIDs.append(segment.get('provider:segmentation_id'))
-        used_vlanIDs = set(usedVlanIDs)
-
-        #find unused VLAN ID
-        for vlanID_range in self.config.get('multisegment_vlan_range'):
-            try:
-                start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
-                for vlanID in xrange(start_vlanid, end_vlanid + 1):
-                    if vlanID not in used_vlanIDs:
-                        return vlanID
-            except Exception as exp:
-                raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
-        else:
-            raise vimconn.vimconnConflictException("Unable to create the VLAN segment."\
-                " All VLAN IDs {} are in use.".format(self.config.get('multisegment_vlan_range')))
-
-
-    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
-        """
-        Method to validate user given vlanID ranges
-            Args:  None
-            Returns: None
-        """
-        for vlanID_range in input_vlan_range:
-            vlan_range = vlanID_range.replace(" ", "")
-            #validate format
-            vlanID_pattern = r'(\d)*-(\d)*$'
-            match_obj = re.match(vlanID_pattern, vlan_range)
-            if not match_obj:
-                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}.You must provide "\
-                "'{}' in format [start_ID - end_ID].".format(text_vlan_range, vlanID_range, text_vlan_range))
-
-            start_vlanid , end_vlanid = map(int,vlan_range.split("-"))
-            if start_vlanid <= 0 :
-                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
-                "Start ID can not be zero. For VLAN "\
-                "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
-            if end_vlanid > 4094 :
-                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
-                "End VLAN ID can not be greater than 4094. For VLAN "\
-                "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
-
-            if start_vlanid > end_vlanid:
-                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
-                    "You must provide '{}' in format start_ID - end_ID and "\
-                    "start_ID < end_ID ".format(text_vlan_range, vlanID_range, text_vlan_range))
-
-#NOT USED FUNCTIONS
-
-    def new_external_port(self, port_data):
-        #TODO openstack if needed
-        '''Adds a external port to VIM'''
-        '''Returns the port identifier'''
-        return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
-
-    def connect_port_network(self, port_id, network_id, admin=False):
-        #TODO openstack if needed
-        '''Connects a external port to a network'''
-        '''Returns status code of the VIM response'''
-        return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
-
-    def new_user(self, user_name, user_passwd, tenant_id=None):
-        '''Adds a new user to openstack VIM'''
-        '''Returns the user identifier'''
-        self.logger.debug("osconnector: Adding a new user to VIM")
-        try:
-            self._reload_connection()
-            user=self.keystone.users.create(user_name, password=user_passwd, default_project=tenant_id)
-            #self.keystone.tenants.add_user(self.k_creds["username"], #role)
-            return user.id
-        except ksExceptions.ConnectionError as e:
-            error_value=-vimconn.HTTP_Bad_Request
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        except ksExceptions.ClientException as e: #TODO remove
-            error_value=-vimconn.HTTP_Bad_Request
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        #TODO insert exception vimconn.HTTP_Unauthorized
-        #if reaching here is because an exception
-        self.logger.debug("new_user " + error_text)
-        return error_value, error_text
-
-    def delete_user(self, user_id):
-        '''Delete a user from openstack VIM'''
-        '''Returns the user identifier'''
-        if self.debug:
-            print("osconnector: Deleting  a  user from VIM")
-        try:
-            self._reload_connection()
-            self.keystone.users.delete(user_id)
-            return 1, user_id
-        except ksExceptions.ConnectionError as e:
-            error_value=-vimconn.HTTP_Bad_Request
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        except ksExceptions.NotFound as e:
-            error_value=-vimconn.HTTP_Not_Found
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        except ksExceptions.ClientException as e: #TODO remove
-            error_value=-vimconn.HTTP_Bad_Request
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        #TODO insert exception vimconn.HTTP_Unauthorized
-        #if reaching here is because an exception
-            self.logger.debug("delete_tenant " + error_text)
-        return error_value, error_text
-
-    def get_hosts_info(self):
-        '''Get the information of deployed hosts
-        Returns the hosts content'''
-        if self.debug:
-            print("osconnector: Getting Host info from VIM")
-        try:
-            h_list=[]
-            self._reload_connection()
-            hypervisors = self.nova.hypervisors.list()
-            for hype in hypervisors:
-                h_list.append( hype.to_dict() )
-            return 1, {"hosts":h_list}
-        except nvExceptions.NotFound as e:
-            error_value=-vimconn.HTTP_Not_Found
-            error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
-        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
-            error_value=-vimconn.HTTP_Bad_Request
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        #TODO insert exception vimconn.HTTP_Unauthorized
-        #if reaching here is because an exception
-        self.logger.debug("get_hosts_info " + error_text)
-        return error_value, error_text
-
-    def get_hosts(self, vim_tenant):
-        '''Get the hosts and deployed instances
-        Returns the hosts content'''
-        r, hype_dict = self.get_hosts_info()
-        if r<0:
-            return r, hype_dict
-        hypervisors = hype_dict["hosts"]
-        try:
-            servers = self.nova.servers.list()
-            for hype in hypervisors:
-                for server in servers:
-                    if server.to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']==hype['hypervisor_hostname']:
-                        if 'vm' in hype:
-                            hype['vm'].append(server.id)
-                        else:
-                            hype['vm'] = [server.id]
-            return 1, hype_dict
-        except nvExceptions.NotFound as e:
-            error_value=-vimconn.HTTP_Not_Found
-            error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
-        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
-            error_value=-vimconn.HTTP_Bad_Request
-            error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
-        #TODO insert exception vimconn.HTTP_Unauthorized
-        #if reaching here is because an exception
-        self.logger.debug("get_hosts " + error_text)
-        return error_value, error_text
-
-    def new_classification(self, name, ctype, definition):
-        self.logger.debug(
-            'Adding a new (Traffic) Classification to VIM, named %s', name)
-        try:
-            new_class = None
-            self._reload_connection()
-            if ctype not in supportedClassificationTypes:
-                raise vimconn.vimconnNotSupportedException(
-                        'OpenStack VIM connector doesn\'t support provided '
-                        'Classification Type {}, supported ones are: '
-                        '{}'.format(ctype, supportedClassificationTypes))
-            if not self._validate_classification(ctype, definition):
-                raise vimconn.vimconnException(
-                    'Incorrect Classification definition '
-                    'for the type specified.')
-            classification_dict = definition
-            classification_dict['name'] = name
-
-            new_class = self.neutron.create_sfc_flow_classifier(
-                {'flow_classifier': classification_dict})
-            return new_class['flow_classifier']['id']
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            self.logger.error(
-                'Creation of Classification failed.')
-            self._format_exception(e)
-
-    def get_classification(self, class_id):
-        self.logger.debug(" Getting Classification %s from VIM", class_id)
-        filter_dict = {"id": class_id}
-        class_list = self.get_classification_list(filter_dict)
-        if len(class_list) == 0:
-            raise vimconn.vimconnNotFoundException(
-                "Classification '{}' not found".format(class_id))
-        elif len(class_list) > 1:
-            raise vimconn.vimconnConflictException(
-                "Found more than one Classification with this criteria")
-        classification = class_list[0]
-        return classification
-
-    def get_classification_list(self, filter_dict={}):
-        self.logger.debug("Getting Classifications from VIM filter: '%s'",
-                          str(filter_dict))
-        try:
-            filter_dict_os = filter_dict.copy()
-            self._reload_connection()
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
-            classification_dict = self.neutron.list_sfc_flow_classifiers(
-                **filter_dict_os)
-            classification_list = classification_dict["flow_classifiers"]
-            self.__classification_os2mano(classification_list)
-            return classification_list
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def delete_classification(self, class_id):
-        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_flow_classifier(class_id)
-            return class_id
-        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
-                ksExceptions.ClientException, neExceptions.NeutronException,
-                ConnectionError) as e:
-            self._format_exception(e)
-
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
-        self.logger.debug(
-            "Adding a new Service Function Instance to VIM, named '%s'", name)
-        try:
-            new_sfi = None
-            self._reload_connection()
-            correlation = None
-            if sfc_encap:
-                correlation = 'nsh'
-            if len(ingress_ports) != 1:
-                raise vimconn.vimconnNotSupportedException(
-                    "OpenStack VIM connector can only have "
-                    "1 ingress port per SFI")
-            if len(egress_ports) != 1:
-                raise vimconn.vimconnNotSupportedException(
-                    "OpenStack VIM connector can only have "
-                    "1 egress port per SFI")
-            sfi_dict = {'name': name,
-                        'ingress': ingress_ports[0],
-                        'egress': egress_ports[0],
-                        'service_function_parameters': {
-                            'correlation': correlation}}
-            new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
-            return new_sfi['port_pair']['id']
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            if new_sfi:
-                try:
-                    self.neutron.delete_sfc_port_pair(
-                        new_sfi['port_pair']['id'])
-                except Exception:
-                    self.logger.error(
-                        'Creation of Service Function Instance failed, with '
-                        'subsequent deletion failure as well.')
-            self._format_exception(e)
-
-    def get_sfi(self, sfi_id):
-        self.logger.debug(
-            'Getting Service Function Instance %s from VIM', sfi_id)
-        filter_dict = {"id": sfi_id}
-        sfi_list = self.get_sfi_list(filter_dict)
-        if len(sfi_list) == 0:
-            raise vimconn.vimconnNotFoundException(
-                "Service Function Instance '{}' not found".format(sfi_id))
-        elif len(sfi_list) > 1:
-            raise vimconn.vimconnConflictException(
-                'Found more than one Service Function Instance '
-                'with this criteria')
-        sfi = sfi_list[0]
-        return sfi
-
-    def get_sfi_list(self, filter_dict={}):
-        self.logger.debug("Getting Service Function Instances from "
-                          "VIM filter: '%s'", str(filter_dict))
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
-            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
-            sfi_list = sfi_dict["port_pairs"]
-            self.__sfi_os2mano(sfi_list)
-            return sfi_list
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def delete_sfi(self, sfi_id):
-        self.logger.debug("Deleting Service Function Instance '%s' "
-                          "from VIM", sfi_id)
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair(sfi_id)
-            return sfi_id
-        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
-                ksExceptions.ClientException, neExceptions.NeutronException,
-                ConnectionError) as e:
-            self._format_exception(e)
-
-    def new_sf(self, name, sfis, sfc_encap=True):
-        self.logger.debug("Adding a new Service Function to VIM, "
-                          "named '%s'", name)
-        try:
-            new_sf = None
-            self._reload_connection()
-            # correlation = None
-            # if sfc_encap:
-            #     correlation = 'nsh'
-            for instance in sfis:
-                sfi = self.get_sfi(instance)
-                if sfi.get('sfc_encap') != sfc_encap:
-                    raise vimconn.vimconnNotSupportedException(
-                        "OpenStack VIM connector requires all SFIs of the "
-                        "same SF to share the same SFC Encapsulation")
-            sf_dict = {'name': name,
-                       'port_pairs': sfis}
-            new_sf = self.neutron.create_sfc_port_pair_group({
-                'port_pair_group': sf_dict})
-            return new_sf['port_pair_group']['id']
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            if new_sf:
-                try:
-                    self.neutron.delete_sfc_port_pair_group(
-                        new_sf['port_pair_group']['id'])
-                except Exception:
-                    self.logger.error(
-                        'Creation of Service Function failed, with '
-                        'subsequent deletion failure as well.')
-            self._format_exception(e)
-
-    def get_sf(self, sf_id):
-        self.logger.debug("Getting Service Function %s from VIM", sf_id)
-        filter_dict = {"id": sf_id}
-        sf_list = self.get_sf_list(filter_dict)
-        if len(sf_list) == 0:
-            raise vimconn.vimconnNotFoundException(
-                "Service Function '{}' not found".format(sf_id))
-        elif len(sf_list) > 1:
-            raise vimconn.vimconnConflictException(
-                "Found more than one Service Function with this criteria")
-        sf = sf_list[0]
-        return sf
-
-    def get_sf_list(self, filter_dict={}):
-        self.logger.debug("Getting Service Function from VIM filter: '%s'",
-                          str(filter_dict))
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
-            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
-            sf_list = sf_dict["port_pair_groups"]
-            self.__sf_os2mano(sf_list)
-            return sf_list
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def delete_sf(self, sf_id):
-        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair_group(sf_id)
-            return sf_id
-        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
-                ksExceptions.ClientException, neExceptions.NeutronException,
-                ConnectionError) as e:
-            self._format_exception(e)
-
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        self.logger.debug("Adding a new Service Function Path to VIM, "
-                          "named '%s'", name)
-        try:
-            new_sfp = None
-            self._reload_connection()
-            # In networking-sfc the MPLS encapsulation is legacy
-            # should be used when no full SFC Encapsulation is intended
-            correlation = 'mpls'
-            if sfc_encap:
-                correlation = 'nsh'
-            sfp_dict = {'name': name,
-                        'flow_classifiers': classifications,
-                        'port_pair_groups': sfs,
-                        'chain_parameters': {'correlation': correlation}}
-            if spi:
-                sfp_dict['chain_id'] = spi
-            new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
-            return new_sfp["port_chain"]["id"]
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            if new_sfp:
-                try:
-                    self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
-                except Exception:
-                    self.logger.error(
-                        'Creation of Service Function Path failed, with '
-                        'subsequent deletion failure as well.')
-            self._format_exception(e)
-
-    def get_sfp(self, sfp_id):
-        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
-        filter_dict = {"id": sfp_id}
-        sfp_list = self.get_sfp_list(filter_dict)
-        if len(sfp_list) == 0:
-            raise vimconn.vimconnNotFoundException(
-                "Service Function Path '{}' not found".format(sfp_id))
-        elif len(sfp_list) > 1:
-            raise vimconn.vimconnConflictException(
-                "Found more than one Service Function Path with this criteria")
-        sfp = sfp_list[0]
-        return sfp
-
-    def get_sfp_list(self, filter_dict={}):
-        self.logger.debug("Getting Service Function Paths from VIM filter: "
-                          "'%s'", str(filter_dict))
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
-            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
-            sfp_list = sfp_dict["port_chains"]
-            self.__sfp_os2mano(sfp_list)
-            return sfp_list
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
-                neExceptions.NeutronException, ConnectionError) as e:
-            self._format_exception(e)
-
-    def delete_sfp(self, sfp_id):
-        self.logger.debug(
-            "Deleting Service Function Path '%s' from VIM", sfp_id)
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_chain(sfp_id)
-            return sfp_id
-        except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
-                ksExceptions.ClientException, neExceptions.NeutronException,
-                ConnectionError) as e:
-            self._format_exception(e)
diff --git a/osm_ro/vimconn_openvim.py b/osm_ro/vimconn_openvim.py
deleted file mode 100644 (file)
index 6f584c5..0000000
+++ /dev/null
@@ -1,1377 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-vimconnector implements all the methods to interact with openvim using the openvim API.
-'''
-__author__="Alfonso Tierno, Gerardo Garcia"
-__date__ ="$26-aug-2014 11:09:29$"
-
-import vimconn
-import requests
-import json
-import yaml
-import logging
-import math
-from openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
-                            vlan1000_schema, integer0_schema
-from jsonschema import validate as js_v, exceptions as js_e
-from urllib import quote
-
-'''contain the openvim virtual machine status to openmano status'''
-vmStatus2manoFormat={'ACTIVE':'ACTIVE',
-                     'PAUSED':'PAUSED',
-                     'SUSPENDED': 'SUSPENDED',
-                     'INACTIVE':'INACTIVE',
-                     'CREATING':'BUILD',
-                     'ERROR':'ERROR','DELETED':'DELETED'
-                     }
-netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
-                     }
-
-
-host_schema = {
-    "type":"object",
-    "properties":{
-        "id": id_schema,
-        "name": name_schema,
-    },
-    "required": ["id"]
-}
-image_schema = {
-    "type":"object",
-    "properties":{
-        "id": id_schema,
-        "name": name_schema,
-    },
-    "required": ["id","name"]
-}
-server_schema = {
-    "type":"object",
-    "properties":{
-        "id":id_schema,
-        "name": name_schema,
-    },
-    "required": ["id","name"]
-}
-new_host_response_schema = {
-    "title":"host response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "host": host_schema
-    },
-    "required": ["host"],
-    "additionalProperties": False
-}
-
-get_images_response_schema = {
-    "title":"openvim images response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "images":{
-            "type":"array",
-            "items": image_schema,
-        }
-    },
-    "required": ["images"],
-    "additionalProperties": False
-}
-
-get_hosts_response_schema = {
-    "title":"openvim hosts response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "hosts":{
-            "type":"array",
-            "items": host_schema,
-        }
-    },
-    "required": ["hosts"],
-    "additionalProperties": False
-}
-
-get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
-
-get_server_response_schema = {
-    "title":"openvim server response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "servers":{
-            "type":"array",
-            "items": server_schema,
-        }
-    },
-    "required": ["servers"],
-    "additionalProperties": False
-}
-
-new_tenant_response_schema = {
-    "title":"tenant response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "tenant":{
-            "type":"object",
-            "properties":{
-                "id": id_schema,
-                "name": nameshort_schema,
-                "description":description_schema,
-                "enabled":{"type" : "boolean"}
-            },
-            "required": ["id"]
-        }
-    },
-    "required": ["tenant"],
-    "additionalProperties": False
-}
-
-new_network_response_schema = {
-    "title":"network response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "network":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "name":name_schema,
-                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
-                "shared":{"type":"boolean"},
-                "tenant_id":id_schema,
-                "admin_state_up":{"type":"boolean"},
-                "vlan":vlan1000_schema
-            },
-            "required": ["id"]
-        }
-    },
-    "required": ["network"],
-    "additionalProperties": False
-}
-
-
-# get_network_response_schema = {
-#     "title":"get network response information schema",
-#     "$schema": "http://json-schema.org/draft-04/schema#",
-#     "type":"object",
-#     "properties":{
-#         "network":{
-#             "type":"object",
-#             "properties":{
-#                 "id":id_schema,
-#                 "name":name_schema,
-#                 "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
-#                 "shared":{"type":"boolean"},
-#                 "tenant_id":id_schema,
-#                 "admin_state_up":{"type":"boolean"},
-#                 "vlan":vlan1000_schema
-#             },
-#             "required": ["id"]
-#         }
-#     },
-#     "required": ["network"],
-#     "additionalProperties": False
-# }
-
-
-new_port_response_schema = {
-    "title":"port response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "port":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-            },
-            "required": ["id"]
-        }
-    },
-    "required": ["port"],
-    "additionalProperties": False
-}
-
-get_flavor_response_schema = {
-    "title":"openvim flavors response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "flavor":{
-            "type":"object",
-            "properties":{
-                "id":   id_schema,
-                "name": name_schema,
-                "extended": {"type":"object"},
-            },
-            "required": ["id", "name"],
-        }
-    },
-    "required": ["flavor"],
-    "additionalProperties": False
-}
-
-new_flavor_response_schema = {
-    "title":"flavor response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "flavor":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-            },
-            "required": ["id"]
-        }
-    },
-    "required": ["flavor"],
-    "additionalProperties": False
-}
-
-get_image_response_schema = {
-    "title":"openvim images response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "image":{
-            "type":"object",
-            "properties":{
-                "id":   id_schema,
-                "name": name_schema,
-            },
-            "required": ["id", "name"],
-        }
-    },
-    "required": ["flavor"],
-    "additionalProperties": False
-}
-new_image_response_schema = {
-    "title":"image response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "image":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-            },
-            "required": ["id"]
-        }
-    },
-    "required": ["image"],
-    "additionalProperties": False
-}
-
-new_vminstance_response_schema = {
-    "title":"server response information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "server":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-            },
-            "required": ["id"]
-        }
-    },
-    "required": ["server"],
-    "additionalProperties": False
-}
-
-get_processor_rankings_response_schema = {
-    "title":"processor rankings information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "rankings":{
-            "type":"array",
-            "items":{
-                "type":"object",
-                "properties":{
-                    "model": description_schema,
-                    "value": integer0_schema
-                },
-                "additionalProperties": False,
-                "required": ["model","value"]
-            }
-        },
-        "additionalProperties": False,
-        "required": ["rankings"]
-    }
-}
-
-class vimconnector(vimconn.vimconnector):
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
-                 log_level="DEBUG", config={}, persistent_info={}):
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
-        self.tenant = None
-        self.headers_req = {'content-type': 'application/json'}
-        self.logger = logging.getLogger('openmano.vim.openvim')
-        self.persistent_info = persistent_info
-        if tenant_id:
-            self.tenant = tenant_id
-
-    def __setitem__(self,index, value):
-        '''Set individuals parameters 
-        Throw TypeError, KeyError
-        '''
-        if index=='tenant_id':
-            self.tenant = value
-        elif index=='tenant_name':
-            self.tenant = None
-        vimconn.vimconnector.__setitem__(self,index, value)    
-
-    def _get_my_tenant(self):
-        '''Obtain uuid of my tenant from name
-        '''
-        if self.tenant:
-            return self.tenant
-
-        url = self.url+'/tenants?name='+ quote(self.tenant_name)
-        self.logger.info("Getting VIM tenant_id GET %s", url)
-        vim_response = requests.get(url, headers = self.headers_req)
-        self._check_http_request_response(vim_response)
-        try:
-            tenant_list = vim_response.json()["tenants"]
-            if len(tenant_list) == 0:
-                raise vimconn.vimconnNotFoundException("No tenant found for name '%s'" % str(self.tenant_name))
-            elif len(tenant_list) > 1:
-                raise vimconn.vimconnConflictException ("More that one tenant found for name '%s'" % str(self.tenant_name))
-            self.tenant = tenant_list[0]["id"]
-            return self.tenant
-        except Exception as e:
-            raise vimconn.vimconnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
-
-    def _format_jsonerror(self,http_response):
-        #DEPRECATED, to delete in the future
-        try:
-            data = http_response.json()
-            return data["error"]["description"]
-        except:
-            return http_response.text
-
-    def _format_in(self, http_response, schema):
-        #DEPRECATED, to delete in the future
-        try:
-            client_data = http_response.json()
-            js_v(client_data, schema)
-            #print "Input data: ", str(client_data)
-            return True, client_data
-        except js_e.ValidationError as exc:
-            print "validate_in error, jsonschema exception ", exc.message, "at", exc.path
-            return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
-    
-    def _remove_extra_items(self, data, schema):
-        deleted=[]
-        if type(data) is tuple or type(data) is list:
-            for d in data:
-                a= self._remove_extra_items(d, schema['items'])
-                if a is not None: deleted.append(a)
-        elif type(data) is dict:
-            for k in data.keys():
-                if 'properties' not in schema or k not in schema['properties'].keys():
-                    del data[k]
-                    deleted.append(k)
-                else:
-                    a = self._remove_extra_items(data[k], schema['properties'][k])
-                    if a is not None:  deleted.append({k:a})
-        if len(deleted) == 0: return None
-        elif len(deleted) == 1: return deleted[0]
-        else: return deleted
-        
-    def _format_request_exception(self, request_exception):
-        '''Transform a request exception into a vimconn exception'''
-        if isinstance(request_exception, js_e.ValidationError):
-            raise vimconn.vimconnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))            
-        elif isinstance(request_exception, requests.exceptions.HTTPError):
-            raise vimconn.vimconnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
-        else:
-            raise vimconn.vimconnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
-
-    def _check_http_request_response(self, request_response):
-        '''Raise a vimconn exception if the response is not Ok'''
-        if request_response.status_code >= 200 and  request_response.status_code < 300:
-            return
-        if request_response.status_code == vimconn.HTTP_Unauthorized:
-            raise vimconn.vimconnAuthException(request_response.text)
-        elif request_response.status_code == vimconn.HTTP_Not_Found:
-            raise vimconn.vimconnNotFoundException(request_response.text)
-        elif request_response.status_code == vimconn.HTTP_Conflict:
-            raise vimconn.vimconnConflictException(request_response.text)
-        else: 
-            raise vimconn.vimconnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
-
-    def new_tenant(self,tenant_name,tenant_description):
-        '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
-        #print "VIMConnector: Adding a new tenant to VIM"
-        payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
-        payload_req = json.dumps(payload_dict)
-        try:
-            url = self.url_admin+'/tenants'
-            self.logger.info("Adding a new tenant %s", url)
-            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_tenant_response_schema)
-            #r = self._remove_extra_items(response, new_tenant_response_schema)
-            #if r is not None: 
-            #    self.logger.warn("Warning: remove extra items %s", str(r))
-            tenant_id = response['tenant']['id']
-            return tenant_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def delete_tenant(self,tenant_id):
-        '''Delete a tenant from VIM. Returns the old tenant identifier'''
-        try:
-            url = self.url_admin+'/tenants/'+tenant_id
-            self.logger.info("Delete a tenant DELETE %s", url)
-            vim_response = requests.delete(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            return tenant_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def get_tenant_list(self, filter_dict={}):
-        '''Obtain tenants of VIM
-        filter_dict can contain the following keys:
-            name: filter by tenant name
-            id: filter by tenant uuid/id
-            <other VIM specific>
-        Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
-        '''
-        filterquery=[]
-        filterquery_text=''
-        for k,v in filter_dict.iteritems():
-            filterquery.append(str(k)+'='+str(v))
-        if len(filterquery)>0:
-            filterquery_text='?'+ '&'.join(filterquery)
-        try:
-            url = self.url+'/tenants'+filterquery_text
-            self.logger.info("get_tenant_list GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            return vim_response.json()["tenants"]
-        except requests.exceptions.RequestException as e:
-            self._format_request_exception(e)
-
-    def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None): #, **vim_specific):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network
-                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
-                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway_address': (Optional) ip_schema, that is X.X.X.X
-                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
-                'dhcp_enabled': True or False
-                'dhcp_start_address': ip_schema, first IP to grant
-                'dhcp_count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        try:
-            created_items = {}
-            self._get_my_tenant()
-            if net_type=="bridge":
-                net_type="bridge_data"
-            payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
-            if vlan:
-                payload_req["provider:vlan"] = vlan
-            # payload_req.update(vim_specific)
-            url = self.url+'/networks'
-            self.logger.info("Adding a new network POST: %s  DATA: %s", url, str(payload_req))
-            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_network_response_schema)
-            #r = self._remove_extra_items(response, new_network_response_schema)
-            #if r is not None: 
-            #    self.logger.warn("Warning: remove extra items %s", str(r))
-            network_id = response['network']['id']
-            return network_id, created_items
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-        
-    def get_network_list(self, filter_dict={}):
-        '''Obtain tenant networks of VIM
-        Filter_dict can be:
-            name: network name
-            id: network uuid
-            public: boolean
-            tenant_id: tenant
-            admin_state_up: boolean
-            status: 'ACTIVE'
-        Returns the network list of dictionaries
-        '''
-        try:
-            if 'tenant_id' not in filter_dict:
-                filter_dict["tenant_id"] = self._get_my_tenant()
-            elif not filter_dict["tenant_id"]:
-                del filter_dict["tenant_id"]
-            filterquery=[]
-            filterquery_text=''
-            for k,v in filter_dict.iteritems():
-                filterquery.append(str(k)+'='+str(v))
-            if len(filterquery)>0:
-                filterquery_text='?'+ '&'.join(filterquery)
-            url = self.url+'/networks'+filterquery_text
-            self.logger.info("Getting network list GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            return response['networks']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def get_network(self, net_id):
-        '''Obtain network details of network id'''
-        try:
-            url = self.url+'/networks/'+net_id
-            self.logger.info("Getting network GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            return response['network']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-            
-    def delete_network(self, net_id, created_items=None):
-        """
-        Removes a tenant network from VIM and its associated elements
-        :param net_id: VIM identifier of the network, provided by method new_network
-        :param created_items: dictionary with extra items to be deleted. provided by method new_network
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-        try:
-            self._get_my_tenant()
-            url = self.url+'/networks/'+net_id
-            self.logger.info("Deleting VIM network DELETE %s", url)
-            vim_response = requests.delete(url, headers=self.headers_req)
-            self._check_http_request_response(vim_response)
-            #self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            return net_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def get_flavor(self, flavor_id):
-        '''Obtain flavor details from the  VIM'''
-        try:
-            self._get_my_tenant()
-            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
-            self.logger.info("Getting flavor GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, get_flavor_response_schema)
-            r = self._remove_extra_items(response, get_flavor_response_schema)
-            if r is not None: 
-                self.logger.warn("Warning: remove extra items %s", str(r))
-            return response['flavor']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-        
-    def new_flavor(self, flavor_data):
-        '''Adds a tenant flavor to VIM'''
-        '''Returns the flavor identifier'''
-        try:
-            new_flavor_dict = flavor_data.copy()
-            for device in new_flavor_dict.get('extended', {}).get('devices', ()):
-                if 'image name' in device:
-                    del device['image name']
-                if 'name' in device:
-                    del device['name']
-            numas = new_flavor_dict.get('extended', {}).get('numas')
-            if numas:
-                numa = numas[0]
-                # translate memory, cpus to EPA
-                if "cores" not in numa and "threads" not in numa and "paired-threads" not in numa:
-                    numa["paired-threads"] = new_flavor_dict["vcpus"]
-                if "memory" not in numa:
-                    numa["memory"] = int(math.ceil(new_flavor_dict["ram"]/1024.0))
-                for iface in numa.get("interfaces", ()):
-                    if not iface.get("bandwidth"):
-                        iface["bandwidth"] = "1 Mbps"
-
-            new_flavor_dict["name"] = flavor_data["name"][:64]
-            self._get_my_tenant()
-            payload_req = json.dumps({'flavor': new_flavor_dict})
-            url = self.url+'/'+self.tenant+'/flavors'
-            self.logger.info("Adding a new VIM flavor POST %s", url)
-            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_flavor_response_schema)
-            r = self._remove_extra_items(response, new_flavor_response_schema)
-            if r is not None: 
-                self.logger.warn("Warning: remove extra items %s", str(r))
-            flavor_id = response['flavor']['id']
-            return flavor_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def delete_flavor(self,flavor_id):
-        '''Deletes a tenant flavor from VIM'''
-        '''Returns the old flavor_id'''
-        try:
-            self._get_my_tenant()
-            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
-            self.logger.info("Deleting VIM flavor DELETE %s", url)
-            vim_response = requests.delete(url, headers=self.headers_req)
-            self._check_http_request_response(vim_response)
-            #self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            return flavor_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def get_image(self, image_id):
-        '''Obtain image details from the  VIM'''
-        try:
-            self._get_my_tenant()
-            url = self.url+'/'+self.tenant+'/images/'+image_id
-            self.logger.info("Getting image GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, get_image_response_schema)
-            r = self._remove_extra_items(response, get_image_response_schema)
-            if r is not None: 
-                self.logger.warn("Warning: remove extra items %s", str(r))
-            return response['image']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def new_image(self,image_dict):
-        ''' Adds a tenant image to VIM, returns image_id'''
-        try:
-            self._get_my_tenant()
-            new_image_dict={'name': image_dict['name'][:64]}
-            if image_dict.get('description'):
-                new_image_dict['description'] = image_dict['description']
-            if image_dict.get('metadata'):
-                new_image_dict['metadata'] = yaml.load(image_dict['metadata'])
-            if image_dict.get('location'):
-                new_image_dict['path'] = image_dict['location']
-            payload_req = json.dumps({"image":new_image_dict})
-            url=self.url + '/' + self.tenant + '/images'
-            self.logger.info("Adding a new VIM image POST %s", url)
-            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_image_response_schema)
-            r = self._remove_extra_items(response, new_image_response_schema)
-            if r is not None: 
-                self.logger.warn("Warning: remove extra items %s", str(r))
-            image_id = response['image']['id']
-            return image_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-            
-    def delete_image(self, image_id):
-        '''Deletes a tenant image from VIM'''
-        '''Returns the deleted image_id'''
-        try:
-            self._get_my_tenant()
-            url = self.url + '/'+ self.tenant +'/images/'+image_id
-            self.logger.info("Deleting VIM image DELETE %s", url)
-            vim_response = requests.delete(url, headers=self.headers_req)
-            self._check_http_request_response(vim_response)
-            #self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            return image_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def get_image_id_from_path(self, path):
-        '''Get the image id from image path in the VIM database. Returns the image_id'''
-        try:
-            self._get_my_tenant()
-            url=self.url + '/' + self.tenant + '/images?path='+quote(path)
-            self.logger.info("Getting images GET %s", url)
-            vim_response = requests.get(url)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, get_images_response_schema)
-            #r = self._remove_extra_items(response, get_images_response_schema)
-            #if r is not None: 
-            #    self.logger.warn("Warning: remove extra items %s", str(r))
-            if len(response['images'])==0:
-                raise vimconn.vimconnNotFoundException("Image not found at VIM with path '%s'", path)
-            elif len(response['images'])>1:
-                raise vimconn.vimconnConflictException("More than one image found at VIM with path '%s'", path)
-            return response['images'][0]['id']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def get_image_list(self, filter_dict={}):
-        '''Obtain tenant images from VIM
-        Filter_dict can be:
-            name: image name
-            id: image uuid
-            checksum: image checksum
-            location: image path
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        '''
-        try:
-            self._get_my_tenant()
-            filterquery=[]
-            filterquery_text=''
-            for k,v in filter_dict.iteritems():
-                filterquery.append(str(k)+'='+str(v))
-            if len(filterquery)>0:
-                filterquery_text='?'+ '&'.join(filterquery)
-            url = self.url+'/'+self.tenant+'/images'+filterquery_text
-            self.logger.info("Getting image list GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            return response['images']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def new_vminstancefromJSON(self, vm_data):
-        '''Adds a VM instance to VIM'''
-        '''Returns the instance identifier'''
-        try:
-            self._get_my_tenant()
-        except Exception as e:
-            return -vimconn.HTTP_Not_Found, str(e)
-        print "VIMConnector: Adding a new VM instance from JSON to VIM"
-        payload_req = vm_data
-        try:
-            vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
-        except requests.exceptions.RequestException as e:
-            print "new_vminstancefromJSON Exception: ", e.args
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print vim_response
-        #print vim_response.status_code
-        if vim_response.status_code == 200:
-            #print vim_response.json()
-            #print json.dumps(vim_response.json(), indent=4)
-            res,http_content = self._format_in(vim_response, new_image_response_schema)
-            #print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_image_response_schema)
-                if r is not None: print "Warning: remove extra items ", r
-                #print http_content
-                vminstance_id = http_content['server']['id']
-                print "Tenant image id: ",vminstance_id
-                return vim_response.status_code,vminstance_id
-            else: return -vimconn.HTTP_Bad_Request,http_content
-        else:
-            #print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "%s": not possible to add new vm instance. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
-            #print text
-            return -vim_response.status_code,text
-
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
-                       availability_zone_index=None, availability_zone_list=None):
-        """Adds a VM instance to VIM
-        Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: image and flavor uuid
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign
-                model: interface model, virtio, e1000, ...
-                mac_address: 
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
-                vim_id: filled/added by this function
-                #TODO ip, security groups
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
-        try:
-            self._get_my_tenant()
-#            net_list = []
-#            for k,v in net_dict.items():
-#                print k,v
-#                net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
-#            net_list_string = ', '.join(net_list) 
-            virtio_net_list=[]
-            for net in net_list:
-                if not net.get("net_id"):
-                    continue
-                net_dict = {'uuid': net["net_id"]}
-                if net.get("type"):
-                    if net["type"] == "SR-IOV":
-                        net_dict["type"] = "VF"
-                    elif net["type"] == "PCI-PASSTHROUGH":
-                        net_dict["type"] = "PF"
-                    else:
-                        net_dict["type"] = net["type"]
-                if net.get("name"):
-                    net_dict["name"] = net["name"]
-                if net.get("vpci"):
-                    net_dict["vpci"] = net["vpci"]
-                if net.get("model"):
-                    if net["model"] == "VIRTIO" or net["model"] == "paravirt":
-                        net_dict["model"] = "virtio"
-                    else:
-                        net_dict["model"] = net["model"]
-                if net.get("mac_address"):
-                    net_dict["mac_address"] = net["mac_address"]
-                if net.get("ip_address"):
-                    net_dict["ip_address"] = net["ip_address"]
-                virtio_net_list.append(net_dict)
-            payload_dict={  "name":        name[:64],
-                            "description": description,
-                            "imageRef":    image_id,
-                            "flavorRef":   flavor_id,
-                            "networks": virtio_net_list
-                        }
-            if start != None:
-                payload_dict["start"] = start
-            payload_req = json.dumps({"server": payload_dict})
-            url = self.url+'/'+self.tenant+'/servers'
-            self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
-            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_vminstance_response_schema)
-            #r = self._remove_extra_items(response, new_vminstance_response_schema)
-            #if r is not None: 
-            #    self.logger.warn("Warning: remove extra items %s", str(r))
-            vminstance_id = response['server']['id']
-
-            #connect data plane interfaces to network
-            for net in net_list:
-                if net["type"]=="virtual":
-                    if not net.get("net_id"):
-                        continue
-                    for iface in response['server']['networks']:
-                        if "name" in net:
-                            if net["name"]==iface["name"]:
-                                net["vim_id"] = iface['iface_id']
-                                break
-                        elif "net_id" in net:
-                            if net["net_id"]==iface["net_id"]:
-                                net["vim_id"] = iface['iface_id']
-                                break
-                else: #dataplane
-                    for numa in response['server'].get('extended',{}).get('numas',() ):
-                        for iface in numa.get('interfaces',() ):
-                            if net['name'] == iface['name']:
-                                net['vim_id'] = iface['iface_id']
-                                #Code bellow is not needed, current openvim connect dataplane interfaces 
-                                #if net.get("net_id"):
-                                ##connect dataplane interface
-                                #    result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
-                                #    if result < 0:
-                                #        error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
-                                #        print "new_vminstance: " + error_text
-                                #        self.delete_vminstance(vminstance_id)
-                                #        return result, error_text
-                                break
-        
-            return vminstance_id, None
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-        
-    def get_vminstance(self, vm_id):
-        '''Returns the VM instance information from VIM'''
-        try:
-            self._get_my_tenant()
-            url = self.url+'/'+self.tenant+'/servers/'+vm_id
-            self.logger.info("Getting vm GET %s", url)
-            vim_response = requests.get(url, headers = self.headers_req)
-            vim_response = requests.get(url, headers = self.headers_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_vminstance_response_schema)
-            #r = self._remove_extra_items(response, new_vminstance_response_schema)
-            #if r is not None: 
-            #    self.logger.warn("Warning: remove extra items %s", str(r))
-            return response['server']
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-        
-    def delete_vminstance(self, vm_id, created_items=None):
-        '''Removes a VM instance from VIM, returns the deleted vm_id'''
-        try:
-            self._get_my_tenant()
-            url = self.url+'/'+self.tenant+'/servers/'+vm_id
-            self.logger.info("Deleting VIM vm DELETE %s", url)
-            vim_response = requests.delete(url, headers=self.headers_req)
-            self._check_http_request_response(vim_response)
-            #self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            return vm_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def refresh_vms_status(self, vm_list):
-        '''Refreshes the status of the virtual machines'''
-        try:
-            self._get_my_tenant()
-        except requests.exceptions.RequestException as e:
-            self._format_request_exception(e)
-        vm_dict={}
-        for vm_id in vm_list:
-            vm={}
-            #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
-            try:
-                url = self.url + '/' + self.tenant + '/servers/' + vm_id
-                self.logger.info("Getting vm GET %s", url)
-                vim_response = requests.get(url, headers = self.headers_req)
-                self._check_http_request_response(vim_response)
-                response = vim_response.json()
-                js_v(response, new_vminstance_response_schema)
-                if response['server']['status'] in vmStatus2manoFormat:
-                    vm['status'] = vmStatus2manoFormat[ response['server']['status']  ]
-                else:
-                    vm['status'] = "OTHER"
-                    vm['error_msg'] = "VIM status reported " + response['server']['status']
-                if response['server'].get('last_error'):
-                    vm['error_msg'] = response['server']['last_error']
-                vm["vim_info"] = yaml.safe_dump(response['server'])
-                #get interfaces info
-                try:
-                    management_ip = False
-                    url2 = self.url + '/ports?device_id=' + quote(vm_id)
-                    self.logger.info("Getting PORTS GET %s", url2)
-                    vim_response2 = requests.get(url2, headers = self.headers_req)
-                    self._check_http_request_response(vim_response2)
-                    client_data = vim_response2.json()
-                    if isinstance(client_data.get("ports"), list):
-                        vm["interfaces"]=[]
-                    for port in client_data.get("ports"):
-                        interface={}
-                        interface['vim_info'] = yaml.safe_dump(port)
-                        interface["mac_address"] = port.get("mac_address")
-                        interface["vim_net_id"] = port.get("network_id")
-                        interface["vim_interface_id"] = port["id"]
-                        interface["ip_address"] = port.get("ip_address")
-                        if interface["ip_address"]:
-                            management_ip = True
-                        if interface["ip_address"] == "0.0.0.0":
-                            interface["ip_address"] = None
-                        vm["interfaces"].append(interface)
-                        
-                except Exception as e:
-                    self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
-
-                if vm['status'] == "ACTIVE" and not management_ip:
-                    vm['status'] = "ACTIVE:NoMgmtIP"
-                    
-            except vimconn.vimconnNotFoundException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm['status'] = "DELETED"
-                vm['error_msg'] = str(e)
-            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm['status'] = "VIM_ERROR"
-                vm['error_msg'] = str(e)
-            vm_dict[vm_id] = vm
-        return vm_dict
-
-    def refresh_nets_status(self, net_list):
-        '''Get the status of the networks
-           Params: the list of network identifiers
-           Returns a dictionary with:
-                net_id:         #VIM id of this network
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, INACTIVE, DOWN (admin down), 
-                                #  BUILD (on building process)
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-
-        '''
-        try:
-            self._get_my_tenant()
-        except requests.exceptions.RequestException as e:
-            self._format_request_exception(e)
-        
-        net_dict={}
-        for net_id in net_list:
-            net = {}
-            #print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
-            try:
-                net_vim = self.get_network(net_id)
-                if net_vim['status'] in netStatus2manoFormat:
-                    net["status"] = netStatus2manoFormat[ net_vim['status'] ]
-                else:
-                    net["status"] = "OTHER"
-                    net["error_msg"] = "VIM status reported " + net_vim['status']
-                    
-                if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
-                    net["status"] = "DOWN"
-                if net_vim.get('last_error'):
-                    net['error_msg'] = net_vim['last_error']
-                net["vim_info"] = yaml.safe_dump(net_vim)
-            except vimconn.vimconnNotFoundException as e:
-                self.logger.error("Exception getting net status: %s", str(e))
-                net['status'] = "DELETED"
-                net['error_msg'] = str(e)
-            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
-                self.logger.error("Exception getting net status: %s", str(e))
-                net['status'] = "VIM_ERROR"
-                net['error_msg'] = str(e)
-            net_dict[net_id] = net
-        return net_dict
-    
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        '''Send and action over a VM instance from VIM'''
-        '''Returns the status'''
-        try:
-            self._get_my_tenant()
-            if "console" in action_dict:
-                raise vimconn.vimconnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
-            url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
-            self.logger.info("Action over VM instance POST %s", url)
-            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
-            self._check_http_request_response(vim_response)
-            return None
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-#NOT USED METHODS in current version        
-  
-    def host_vim2gui(self, host, server_dict):
-        '''Transform host dictionary from VIM format to GUI format,
-        and append to the server_dict
-        '''
-        if type(server_dict) is not dict: 
-            print 'vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary'
-            return
-        RAD={}
-        occupation={}
-        for numa in host['host']['numas']:
-            RAD_item={}
-            occupation_item={}
-            #memory
-            RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
-            occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
-            #cpus
-            RAD_item['cpus']={}
-            RAD_item['cpus']['cores'] = []
-            RAD_item['cpus']['eligible_cores'] = []
-            occupation_item['cores']=[]
-            for _ in range(0, len(numa['cores']) / 2):
-                RAD_item['cpus']['cores'].append( [] )
-            for core in numa['cores']:
-                RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
-                if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
-                if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
-            #ports
-            RAD_item['ports']={}
-            occupation_item['ports']={}
-            for iface in numa['interfaces']:
-                RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
-                occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] / iface['Mbps']) + "%" }
-                
-            RAD[ numa['numa_socket'] ] = RAD_item
-            occupation[ numa['numa_socket'] ] = occupation_item
-        server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
-
-    def get_hosts_info(self):
-        '''Get the information of deployed hosts
-        Returns the hosts content'''
-    #obtain hosts list
-        url=self.url+'/hosts'
-        try:
-            vim_response = requests.get(url)
-        except requests.exceptions.RequestException as e:
-            print "get_hosts_info Exception: ", e.args
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print "vim get", url, "response:",  vim_response.status_code, vim_response.json()
-        #print vim_response.status_code
-        #print json.dumps(vim_response.json(), indent=4)
-        if vim_response.status_code != 200:
-            #TODO: get error
-            print 'vimconnector.get_hosts_info error getting host list %d %s' %(vim_response.status_code, vim_response.json())
-            return -vim_response.status_code, "Error getting host list"
-        
-        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
-            
-        if res==False:
-            print "vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts
-            return vimconn.HTTP_Internal_Server_Error, hosts
-    #obtain hosts details
-        hosts_dict={}
-        for host in hosts['hosts']:
-            url=self.url+'/hosts/'+host['id']
-            try:
-                vim_response = requests.get(url)
-            except requests.exceptions.RequestException as e:
-                print "get_hosts_info Exception: ", e.args
-                return -vimconn.HTTP_Not_Found, str(e.args[0])
-            print "vim get", url, "response:",  vim_response.status_code, vim_response.json()
-            if vim_response.status_code != 200:
-                print 'vimconnector.get_hosts_info error getting detailed host %d %s' %(vim_response.status_code, vim_response.json())
-                continue
-            res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
-            if res==False:
-                print "vimconnector.get_hosts_info error parsing GET HOSTS/%s vim response" % host['id'], host_detail
-                continue
-            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
-            self.host_vim2gui(host_detail, hosts_dict)
-        return 200, hosts_dict
-
-    def get_hosts(self, vim_tenant):
-        '''Get the hosts and deployed instances
-        Returns the hosts content'''
-    #obtain hosts list
-        url=self.url+'/hosts'
-        try:
-            vim_response = requests.get(url)
-        except requests.exceptions.RequestException as e:
-            print "get_hosts Exception: ", e.args
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print "vim get", url, "response:",  vim_response.status_code, vim_response.json()
-        #print vim_response.status_code
-        #print json.dumps(vim_response.json(), indent=4)
-        if vim_response.status_code != 200:
-            #TODO: get error
-            print 'vimconnector.get_hosts error getting host list %d %s' %(vim_response.status_code, vim_response.json())
-            return -vim_response.status_code, "Error getting host list"
-        
-        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
-            
-        if res==False:
-            print "vimconnector.get_host error parsing GET HOSTS vim response", hosts
-            return vimconn.HTTP_Internal_Server_Error, hosts
-    #obtain instances from hosts
-        for host in hosts['hosts']:
-            url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
-            try:
-                vim_response = requests.get(url)
-            except requests.exceptions.RequestException as e:
-                print "get_hosts Exception: ", e.args
-                return -vimconn.HTTP_Not_Found, str(e.args[0])
-            print "vim get", url, "response:",  vim_response.status_code, vim_response.json()
-            if vim_response.status_code != 200:
-                print 'vimconnector.get_hosts error getting instances at host %d %s' %(vim_response.status_code, vim_response.json())
-                continue
-            res,servers = self._format_in(vim_response, get_server_response_schema)
-            if res==False:
-                print "vimconnector.get_host error parsing GET SERVERS/%s vim response" % host['id'], servers
-                continue
-            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
-            host['instances'] = servers['servers']
-        return 200, hosts['hosts']
-
-    def get_processor_rankings(self):
-        '''Get the processor rankings in the VIM database'''
-        url=self.url+'/processor_ranking'
-        try:
-            vim_response = requests.get(url)
-        except requests.exceptions.RequestException as e:
-            print "get_processor_rankings Exception: ", e.args
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print "vim get", url, "response:", vim_response.status_code, vim_response.json()
-        #print vim_response.status_code
-        #print json.dumps(vim_response.json(), indent=4)
-        if vim_response.status_code != 200:
-            #TODO: get error
-            print 'vimconnector.get_processor_rankings error getting processor rankings %d %s' %(vim_response.status_code, vim_response.json())
-            return -vim_response.status_code, "Error getting processor rankings"
-        
-        res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
-        return res, rankings['rankings']
-    
-    def new_host(self, host_data):
-        '''Adds a new host to VIM'''
-        '''Returns status code of the VIM response'''
-        payload_req = host_data
-        try:
-            url = self.url_admin+'/hosts'
-            self.logger.info("Adding a new host POST %s", url)
-            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            #print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_host_response_schema)
-            r = self._remove_extra_items(response, new_host_response_schema)
-            if r is not None: 
-                self.logger.warn("Warning: remove extra items %s", str(r))
-            host_id = response['host']['id']
-            return host_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-    
-    def new_external_port(self, port_data):
-        '''Adds a external port to VIM'''
-        '''Returns the port identifier'''
-        #TODO change to logging exception code policies
-        print "VIMConnector: Adding a new external port"
-        payload_req = port_data
-        try:
-            vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
-        except requests.exceptions.RequestException as e:
-            self.logger.error("new_external_port Exception: ", str(e))
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print vim_response
-        #print vim_response.status_code
-        if vim_response.status_code == 200:
-        #print vim_response.json()
-        #print json.dumps(vim_response.json(), indent=4)
-            res, http_content = self._format_in(vim_response, new_port_response_schema)
-        #print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_port_response_schema)
-                if r is not None: print "Warning: remove extra items ", r
-                #print http_content
-                port_id = http_content['port']['id']
-                print "Port id: ",port_id
-                return vim_response.status_code,port_id
-            else: return -vimconn.HTTP_Bad_Request,http_content
-        else:
-            #print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "%s": not possible to add new external port. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
-            #print text
-            return -vim_response.status_code,text
-        
-    def new_external_network(self,net_name,net_type):
-        '''Adds a external network to VIM (shared)'''
-        '''Returns the network identifier'''
-        #TODO change to logging exception code policies
-        print "VIMConnector: Adding external shared network to VIM (type " + net_type + "): "+ net_name
-        
-        payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
-        try:
-            vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
-        except requests.exceptions.RequestException as e:
-            self.logger.error( "new_external_network Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print vim_response
-        #print vim_response.status_code
-        if vim_response.status_code == 200:
-            #print vim_response.json()
-            #print json.dumps(vim_response.json(), indent=4)
-            res,http_content = self._format_in(vim_response, new_network_response_schema)
-            #print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_network_response_schema)
-                if r is not None: print "Warning: remove extra items ", r
-                #print http_content
-                network_id = http_content['network']['id']
-                print "Network id: ",network_id
-                return vim_response.status_code,network_id
-            else: return -vimconn.HTTP_Bad_Request,http_content
-        else:
-            #print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "%s": not possible to add new external network. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
-            #print text
-            return -vim_response.status_code,text
-        
-    def connect_port_network(self, port_id, network_id, admin=False):
-        '''Connects a external port to a network'''
-        '''Returns status code of the VIM response'''
-        #TODO change to logging exception code policies
-        print "VIMConnector: Connecting external port to network"
-        
-        payload_req = '{"port":{"network_id":"' + network_id + '"}}'
-        if admin:
-            if self.url_admin==None:
-                return -vimconn.HTTP_Unauthorized, "datacenter cannot contain  admin URL"
-            url= self.url_admin
-        else:
-            url= self.url
-        try:
-            vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
-        except requests.exceptions.RequestException as e:
-            print "connect_port_network Exception: ", e.args
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print vim_response
-        #print vim_response.status_code
-        if vim_response.status_code == 200:
-            #print vim_response.json()
-            #print json.dumps(vim_response.json(), indent=4)
-            res,http_content = self._format_in(vim_response, new_port_response_schema)
-            #print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_port_response_schema)
-                if r is not None: print "Warning: remove extra items ", r
-                #print http_content
-                port_id = http_content['port']['id']
-                print "Port id: ",port_id
-                return vim_response.status_code,port_id
-            else: return -vimconn.HTTP_Bad_Request,http_content
-        else:
-            print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "%s": not possible to connect external port to network. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
-            print text
-            return -vim_response.status_code,text
-        
-
diff --git a/osm_ro/vimconn_vmware.py b/osm_ro/vimconn_vmware.py
deleted file mode 100644 (file)
index f343eea..0000000
+++ /dev/null
@@ -1,6618 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact:  osslegalrouting@vmware.com
-##
-
-"""
-vimconn_vmware implementation an Abstract class in order to interact with VMware  vCloud Director.
-mbayramov@vmware.com
-"""
-from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
-
-import vimconn
-import os
-import shutil
-import subprocess
-import tempfile
-import traceback
-import itertools
-import requests
-import ssl
-import atexit
-
-from pyVmomi import vim, vmodl
-from pyVim.connect import SmartConnect, Disconnect
-
-from xml.etree import ElementTree as XmlElementTree
-from lxml import etree as lxmlElementTree
-
-import yaml
-from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
-from pyvcloud.vcd.vdc import VDC
-from pyvcloud.vcd.org import Org
-import re
-from pyvcloud.vcd.vapp import VApp
-from xml.sax.saxutils import escape
-import logging
-import json
-import time
-import uuid
-import httplib
-#For python3
-#import http.client
-import hashlib
-import socket
-import struct
-import netaddr
-import random
-
-# global variable for vcd connector type
-STANDALONE = 'standalone'
-
-# key for flavor dicts
-FLAVOR_RAM_KEY = 'ram'
-FLAVOR_VCPUS_KEY = 'vcpus'
-FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'dhcp_count':50,
-                      'dhcp_enabled':True,
-                      'ip_version':"IPv4"
-                      }
-# global variable for wait time
-INTERVAL_TIME = 5
-MAX_WAIT_TIME = 1800
-
-API_VERSION = '27.0'
-
-__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
-__date__ = "$09-Mar-2018 11:09:29$"
-__version__ = '0.2'
-
-#     -1: "Could not be created",
-#     0: "Unresolved",
-#     1: "Resolved",
-#     2: "Deployed",
-#     3: "Suspended",
-#     4: "Powered on",
-#     5: "Waiting for user input",
-#     6: "Unknown state",
-#     7: "Unrecognized state",
-#     8: "Powered off",
-#     9: "Inconsistent state",
-#     10: "Children do not all have the same status",
-#     11: "Upload initiated, OVF descriptor pending",
-#     12: "Upload initiated, copying contents",
-#     13: "Upload initiated , disk contents pending",
-#     14: "Upload has been quarantined",
-#     15: "Upload quarantine period has expired"
-
-# mapping vCD status to MANO
-vcdStatusCode2manoFormat = {4: 'ACTIVE',
-                            7: 'PAUSED',
-                            3: 'SUSPENDED',
-                            8: 'INACTIVE',
-                            12: 'BUILD',
-                            -1: 'ERROR',
-                            14: 'DELETED'}
-
-#
-netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
-                        'ERROR': 'ERROR', 'DELETED': 'DELETED'
-                        }
-
-class vimconnector(vimconn.vimconnector):
-    # dict used to store flavor in memory
-    flavorlist = {}
-
-    def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
-                 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
-        """
-        Constructor create vmware connector to vCloud director.
-
-        By default construct doesn't validate connection state. So client can create object with None arguments.
-        If client specified username , password and host and VDC name.  Connector initialize other missing attributes.
-
-        a) It initialize organization UUID
-        b) Initialize tenant_id/vdc ID.   (This information derived from tenant name)
-
-        Args:
-            uuid - is organization uuid.
-            name - is organization name that must be presented in vCloud director.
-            tenant_id - is VDC uuid it must be presented in vCloud director
-            tenant_name - is VDC name.
-            url - is hostname or ip address of vCloud director
-            url_admin - same as above.
-            user - is user that administrator for organization. Caller must make sure that
-                    username has right privileges.
-
-            password - is password for a user.
-
-            VMware connector also requires PVDC administrative privileges and separate account.
-            This variables must be passed via config argument dict contains keys
-
-            dict['admin_username']
-            dict['admin_password']
-            config - Provide NSX and vCenter information
-
-            Returns:
-                Nothing.
-        """
-
-        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
-                                      url_admin, user, passwd, log_level, config)
-
-        self.logger = logging.getLogger('openmano.vim.vmware')
-        self.logger.setLevel(10)
-        self.persistent_info = persistent_info
-
-        self.name = name
-        self.id = uuid
-        self.url = url
-        self.url_admin = url_admin
-        self.tenant_id = tenant_id
-        self.tenant_name = tenant_name
-        self.user = user
-        self.passwd = passwd
-        self.config = config
-        self.admin_password = None
-        self.admin_user = None
-        self.org_name = ""
-        self.nsx_manager = None
-        self.nsx_user = None
-        self.nsx_password = None
-        self.availability_zone = None
-
-        # Disable warnings from self-signed certificates.
-        requests.packages.urllib3.disable_warnings()
-
-        if tenant_name is not None:
-            orgnameandtenant = tenant_name.split(":")
-            if len(orgnameandtenant) == 2:
-                self.tenant_name = orgnameandtenant[1]
-                self.org_name = orgnameandtenant[0]
-            else:
-                self.tenant_name = tenant_name
-        if "orgname" in config:
-            self.org_name = config['orgname']
-
-        if log_level:
-            self.logger.setLevel(getattr(logging, log_level))
-
-        try:
-            self.admin_user = config['admin_username']
-            self.admin_password = config['admin_password']
-        except KeyError:
-            raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
-
-        try:
-            self.nsx_manager = config['nsx_manager']
-            self.nsx_user = config['nsx_user']
-            self.nsx_password = config['nsx_password']
-        except KeyError:
-            raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
-
-        self.vcenter_ip = config.get("vcenter_ip", None)
-        self.vcenter_port = config.get("vcenter_port", None)
-        self.vcenter_user = config.get("vcenter_user", None)
-        self.vcenter_password = config.get("vcenter_password", None)
-
-        #Set availability zone for Affinity rules
-        self.availability_zone = self.set_availability_zones()
-
-# ############# Stub code for SRIOV #################
-#         try:
-#             self.dvs_name = config['dv_switch_name']
-#         except KeyError:
-#             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
-#
-#         self.vlanID_range = config.get("vlanID_range", None)
-
-        self.org_uuid = None
-        self.client = None
-
-        if not url:
-            raise vimconn.vimconnException('url param can not be NoneType')
-
-        if not self.url_admin:  # try to use normal url
-            self.url_admin = self.url
-
-        logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
-                                                                              self.tenant_id, self.tenant_name))
-        logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
-        logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
-
-        # initialize organization
-        if self.user is not None and self.passwd is not None and self.url:
-            self.init_organization()
-
-    def __getitem__(self, index):
-        if index == 'name':
-            return self.name
-        if index == 'tenant_id':
-            return self.tenant_id
-        if index == 'tenant_name':
-            return self.tenant_name
-        elif index == 'id':
-            return self.id
-        elif index == 'org_name':
-            return self.org_name
-        elif index == 'org_uuid':
-            return self.org_uuid
-        elif index == 'user':
-            return self.user
-        elif index == 'passwd':
-            return self.passwd
-        elif index == 'url':
-            return self.url
-        elif index == 'url_admin':
-            return self.url_admin
-        elif index == "config":
-            return self.config
-        else:
-            raise KeyError("Invalid key '%s'" % str(index))
-
-    def __setitem__(self, index, value):
-        if index == 'name':
-            self.name = value
-        if index == 'tenant_id':
-            self.tenant_id = value
-        if index == 'tenant_name':
-            self.tenant_name = value
-        elif index == 'id':
-            self.id = value
-        elif index == 'org_name':
-            self.org_name = value
-        elif index == 'org_uuid':
-            self.org_uuid = value
-        elif index == 'user':
-            self.user = value
-        elif index == 'passwd':
-            self.passwd = value
-        elif index == 'url':
-            self.url = value
-        elif index == 'url_admin':
-            self.url_admin = value
-        else:
-            raise KeyError("Invalid key '%s'" % str(index))
-
-    def connect_as_admin(self):
-        """ Method connect as pvdc admin user to vCloud director.
-            There are certain action that can be done only by provider vdc admin user.
-            Organization creation / provider network creation etc.
-
-            Returns:
-                The return client object that latter can be used to connect to vcloud director as admin for provider vdc
-        """
-        self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
-
-        try:
-            host = self.url
-            org = 'System'
-            client_as_admin = Client(host, verify_ssl_certs=False)
-            client_as_admin.set_highest_supported_version()
-            client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
-        except Exception as e:
-            raise vimconn.vimconnException(
-                  "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
-
-        return client_as_admin
-
-    def connect(self):
-        """ Method connect as normal user to vCloud director.
-
-            Returns:
-                The return client object that latter can be used to connect to vCloud director as admin for VDC
-        """
-        try:
-            self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
-                                                                                      self.user,
-                                                                                      self.org_name))
-            host = self.url
-            client = Client(host, verify_ssl_certs=False)
-            client.set_highest_supported_version()
-            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
-        except:
-            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
-                                                     "{} as user: {}".format(self.org_name, self.user))
-
-        return client
-
-    def init_organization(self):
-        """ Method initialize organization UUID and VDC parameters.
-
-            At bare minimum client must provide organization name that present in vCloud director and VDC.
-
-            The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
-            The Org - UUID will be initialized at the run time if data center present in vCloud director.
-
-            Returns:
-                The return vca object that letter can be used to connect to vcloud direct as admin
-        """
-        client = self.connect()
-        if not client:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
-
-        self.client = client
-        try:
-            if self.org_uuid is None:
-                org_list = client.get_org_list()
-                for org in org_list.Org:
-                    # we set org UUID at the init phase but we can do it only when we have valid credential.
-                    if org.get('name') == self.org_name:
-                        self.org_uuid = org.get('href').split('/')[-1]
-                        self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
-                        break
-                else:
-                    raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
-
-                # if well good we require for org details
-                org_details_dict = self.get_org(org_uuid=self.org_uuid)
-
-                # we have two case if we want to initialize VDC ID or VDC name at run time
-                # tenant_name provided but no tenant id
-                if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
-                    vdcs_dict = org_details_dict['vdcs']
-                    for vdc in vdcs_dict:
-                        if vdcs_dict[vdc] == self.tenant_name:
-                            self.tenant_id = vdc
-                            self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
-                                                                                                    self.org_name))
-                            break
-                    else:
-                        raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
-                    # case two we have tenant_id but we don't have tenant name so we find and set it.
-                    if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
-                        vdcs_dict = org_details_dict['vdcs']
-                        for vdc in vdcs_dict:
-                            if vdc == self.tenant_id:
-                                self.tenant_name = vdcs_dict[vdc]
-                                self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
-                                                                                                        self.org_name))
-                                break
-                        else:
-                            raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
-            self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
-        except:
-            self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
-            self.logger.debug(traceback.format_exc())
-            self.org_uuid = None
-
-    def new_tenant(self, tenant_name=None, tenant_description=None):
-        """ Method adds a new tenant to VIM with this name.
-            This action requires access to create VDC action in vCloud director.
-
-            Args:
-                tenant_name is tenant_name to be created.
-                tenant_description not used for this call
-
-            Return:
-                returns the tenant identifier in UUID format.
-                If action is failed method will throw vimconn.vimconnException method
-            """
-        vdc_task = self.create_vdc(vdc_name=tenant_name)
-        if vdc_task is not None:
-            vdc_uuid, value = vdc_task.popitem()
-            self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
-            return vdc_uuid
-        else:
-            raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
-
-    def delete_tenant(self, tenant_id=None):
-        """ Delete a tenant from VIM
-             Args:
-                tenant_id is tenant_id to be deleted.
-
-            Return:
-                returns the tenant identifier in UUID format.
-                If action is failed method will throw exception
-        """
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD")
-
-        if tenant_id is not None:
-            if vca._session:
-                #Get OrgVDC
-                url_list = [self.url, '/api/vdc/', tenant_id]
-                orgvdc_herf = ''.join(url_list)
-
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-                response = self.perform_request(req_type='GET',
-                                                url=orgvdc_herf,
-                                                headers=headers)
-
-                if response.status_code != requests.codes.ok:
-                    self.logger.debug("delete_tenant():GET REST API call {} failed. "\
-                                      "Return status code {}".format(orgvdc_herf,
-                                                                     response.status_code))
-                    raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
-
-                lxmlroot_respond = lxmlElementTree.fromstring(response.content)
-                namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
-                #For python3
-                #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
-                namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
-                vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
-                vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
-
-                response = self.perform_request(req_type='DELETE',
-                                                url=vdc_remove_href,
-                                                headers=headers)
-
-                if response.status_code == 202:
-                    time.sleep(5)
-                    return tenant_id
-                else:
-                    self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
-                                      "Return status code {}".format(vdc_remove_href,
-                                                                     response.status_code))
-                    raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
-        else:
-            self.logger.debug("delete_tenant():Incorrect tenant ID  {}".format(tenant_id))
-            raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
-
-
-    def get_tenant_list(self, filter_dict={}):
-        """Obtain tenants of VIM
-        filter_dict can contain the following keys:
-            name: filter by tenant name
-            id: filter by tenant uuid/id
-            <other VIM specific>
-        Returns the tenant list of dictionaries:
-            [{'name':'<name>, 'id':'<id>, ...}, ...]
-
-        """
-        org_dict = self.get_org(self.org_uuid)
-        vdcs_dict = org_dict['vdcs']
-
-        vdclist = []
-        try:
-            for k in vdcs_dict:
-                entry = {'name': vdcs_dict[k], 'id': k}
-                # if caller didn't specify dictionary we return all tenants.
-                if filter_dict is not None and filter_dict:
-                    filtered_entry = entry.copy()
-                    filtered_dict = set(entry.keys()) - set(filter_dict)
-                    for unwanted_key in filtered_dict: del entry[unwanted_key]
-                    if filter_dict == entry:
-                        vdclist.append(filtered_entry)
-                else:
-                    vdclist.append(entry)
-        except:
-            self.logger.debug("Error in get_tenant_list()")
-            self.logger.debug(traceback.format_exc())
-            raise vimconn.vimconnException("Incorrect state. {}")
-
-        return vdclist
-
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        """Adds a tenant network to VIM
-        Params:
-            'net_name': name of the network
-            'net_type': one of:
-                'bridge': overlay isolated network
-                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
-                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network
-                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
-                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway_address': (Optional) ip_schema, that is X.X.X.X
-                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
-                'dhcp_enabled': True or False
-                'dhcp_start_address': ip_schema, first IP to grant
-                'dhcp_count': number of IPs to grant.
-            'shared': if this network can be seen/use by other tenants/organization
-            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
-        Returns a tuple with the network identifier and created_items, or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-
-        self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
-                          .format(net_name, net_type, ip_profile, shared))
-
-        created_items = {}
-        isshared = 'false'
-        if shared:
-            isshared = 'true'
-
-# ############# Stub code for SRIOV #################
-#         if net_type == "data" or net_type == "ptp":
-#             if self.config.get('dv_switch_name') == None:
-#                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
-#             network_uuid = self.create_dvPort_group(net_name)
-
-        network_uuid = self.create_network(network_name=net_name, net_type=net_type,
-                                           ip_profile=ip_profile, isshared=isshared)
-        if network_uuid is not None:
-            return network_uuid, created_items
-        else:
-            raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
-
-    def get_vcd_network_list(self):
-        """ Method available organization for a logged in tenant
-
-            Returns:
-                The return vca object that letter can be used to connect to vcloud direct as admin
-        """
-
-        self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
-
-        if not self.tenant_name:
-            raise vimconn.vimconnConnectionException("Tenant name is empty.")
-
-        org, vdc = self.get_vdc_details()
-        if vdc is None:
-            raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
-
-        vdc_uuid = vdc.get('id').split(":")[3]
-        if self.client._session:
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                response = self.perform_request(req_type='GET',
-                                           url=vdc.get('href'),
-                                               headers=headers)
-        if response.status_code != 200:
-            self.logger.error("Failed to get vdc content")
-            raise vimconn.vimconnNotFoundException("Failed to get vdc content")
-        else:
-            content = XmlElementTree.fromstring(response.content)
-
-        network_list = []
-        try:
-            for item in content:
-                if item.tag.split('}')[-1] == 'AvailableNetworks':
-                    for net in item:
-                        response = self.perform_request(req_type='GET',
-                                                   url=net.get('href'),
-                                                       headers=headers)
-
-                        if response.status_code != 200:
-                            self.logger.error("Failed to get network content")
-                            raise vimconn.vimconnNotFoundException("Failed to get network content")
-                        else:
-                            net_details = XmlElementTree.fromstring(response.content)
-
-                            filter_dict = {}
-                            net_uuid = net_details.get('id').split(":")
-                            if len(net_uuid) != 4:
-                                continue
-                            else:
-                                net_uuid = net_uuid[3]
-                                # create dict entry
-                                self.logger.debug("get_vcd_network_list(): Adding network {} "
-                                                  "to a list vcd id {} network {}".format(net_uuid,
-                                                                                          vdc_uuid,
-                                                                                          net_details.get('name')))
-                                filter_dict["name"] = net_details.get('name')
-                                filter_dict["id"] = net_uuid
-                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
-                                    shared = True
-                                else:
-                                    shared = False
-                                filter_dict["shared"] = shared
-                                filter_dict["tenant_id"] = vdc_uuid
-                                if int(net_details.get('status')) == 1:
-                                    filter_dict["admin_state_up"] = True
-                                else:
-                                    filter_dict["admin_state_up"] = False
-                                filter_dict["status"] = "ACTIVE"
-                                filter_dict["type"] = "bridge"
-                                network_list.append(filter_dict)
-                                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
-        except:
-            self.logger.debug("Error in get_vcd_network_list", exc_info=True)
-            pass
-
-        self.logger.debug("get_vcd_network_list returning {}".format(network_list))
-        return network_list
-
-    def get_network_list(self, filter_dict={}):
-        """Obtain tenant networks of VIM
-        Filter_dict can be:
-            name: network name  OR/AND
-            id: network uuid    OR/AND
-            shared: boolean     OR/AND
-            tenant_id: tenant   OR/AND
-            admin_state_up: boolean
-            status: 'ACTIVE'
-
-        [{key : value , key : value}]
-
-        Returns the network list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        """
-
-        self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
-
-        if not self.tenant_name:
-            raise vimconn.vimconnConnectionException("Tenant name is empty.")
-
-        org, vdc = self.get_vdc_details()
-        if vdc is None:
-            raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
-
-        try:
-            vdcid = vdc.get('id').split(":")[3]
-
-            if self.client._session:
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                response = self.perform_request(req_type='GET',
-                                           url=vdc.get('href'),
-                                               headers=headers)
-            if response.status_code != 200:
-                self.logger.error("Failed to get vdc content")
-                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
-            else:
-                content = XmlElementTree.fromstring(response.content)
-
-            network_list = []
-            for item in content:
-                if item.tag.split('}')[-1] == 'AvailableNetworks':
-                    for net in item:
-                        response = self.perform_request(req_type='GET',
-                                                   url=net.get('href'),
-                                                       headers=headers)
-
-                        if response.status_code != 200:
-                            self.logger.error("Failed to get network content")
-                            raise vimconn.vimconnNotFoundException("Failed to get network content")
-                        else:
-                            net_details = XmlElementTree.fromstring(response.content)
-
-                            filter_entry = {}
-                            net_uuid = net_details.get('id').split(":")
-                            if len(net_uuid) != 4:
-                                continue
-                            else:
-                                net_uuid = net_uuid[3]
-                                # create dict entry
-                                self.logger.debug("get_network_list(): Adding net {}"
-                                                  " to a list vcd id {} network {}".format(net_uuid,
-                                                                                           vdcid,
-                                                                                           net_details.get('name')))
-                                filter_entry["name"] = net_details.get('name')
-                                filter_entry["id"] = net_uuid
-                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
-                                    shared = True
-                                else:
-                                    shared = False
-                                filter_entry["shared"] = shared
-                                filter_entry["tenant_id"] = vdcid
-                                if int(net_details.get('status')) == 1:
-                                    filter_entry["admin_state_up"] = True
-                                else:
-                                    filter_entry["admin_state_up"] = False
-                                filter_entry["status"] = "ACTIVE"
-                                filter_entry["type"] = "bridge"
-                                filtered_entry = filter_entry.copy()
-
-                                if filter_dict is not None and filter_dict:
-                                    # we remove all the key : value we don't care and match only
-                                    # respected field
-                                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
-                                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
-                                    if filter_dict == filter_entry:
-                                        network_list.append(filtered_entry)
-                                else:
-                                    network_list.append(filtered_entry)
-        except Exception as e:
-            self.logger.debug("Error in get_network_list",exc_info=True)
-            if isinstance(e, vimconn.vimconnException):
-                raise
-            else:
-                raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
-
-        self.logger.debug("Returning {}".format(network_list))
-        return network_list
-
-    def get_network(self, net_id):
-        """Method obtains network details of net_id VIM network
-           Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
-
-        try:
-            org, vdc = self.get_vdc_details()
-            vdc_id = vdc.get('id').split(":")[3]
-            if self.client._session:
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                response = self.perform_request(req_type='GET',
-                                           url=vdc.get('href'),
-                                               headers=headers)
-            if response.status_code != 200:
-                self.logger.error("Failed to get vdc content")
-                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
-            else:
-                content = XmlElementTree.fromstring(response.content)
-
-            filter_dict = {}
-
-            for item in content:
-                if item.tag.split('}')[-1] == 'AvailableNetworks':
-                    for net in item:
-                        response = self.perform_request(req_type='GET',
-                                                   url=net.get('href'),
-                                                       headers=headers)
-
-                        if response.status_code != 200:
-                            self.logger.error("Failed to get network content")
-                            raise vimconn.vimconnNotFoundException("Failed to get network content")
-                        else:
-                            net_details = XmlElementTree.fromstring(response.content)
-
-                            vdc_network_id = net_details.get('id').split(":")
-                            if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
-                                filter_dict["name"] = net_details.get('name')
-                                filter_dict["id"] = vdc_network_id[3]
-                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
-                                    shared = True
-                                else:
-                                    shared = False
-                                filter_dict["shared"] = shared
-                                filter_dict["tenant_id"] = vdc_id
-                                if int(net_details.get('status')) == 1:
-                                    filter_dict["admin_state_up"] = True
-                                else:
-                                    filter_dict["admin_state_up"] = False
-                                filter_dict["status"] = "ACTIVE"
-                                filter_dict["type"] = "bridge"
-                                self.logger.debug("Returning {}".format(filter_dict))
-                                return filter_dict
-                    else:
-                        raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
-        except Exception as e:
-            self.logger.debug("Error in get_network")
-            self.logger.debug(traceback.format_exc())
-            if isinstance(e, vimconn.vimconnException):
-                raise
-            else:
-                raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
-
-        return filter_dict
-
-    def delete_network(self, net_id, created_items=None):
-        """
-        Removes a tenant network from VIM and its associated elements
-        :param net_id: VIM identifier of the network, provided by method new_network
-        :param created_items: dictionary with extra items to be deleted. provided by method new_network
-        Returns the network identifier or raises an exception upon error or when network is not found
-        """
-
-        # ############# Stub code for SRIOV #################
-#         dvport_group = self.get_dvport_group(net_id)
-#         if dvport_group:
-#             #delete portgroup
-#             status = self.destroy_dvport_group(net_id)
-#             if status:
-#                 # Remove vlanID from persistent info
-#                 if net_id in self.persistent_info["used_vlanIDs"]:
-#                     del self.persistent_info["used_vlanIDs"][net_id]
-#
-#                 return net_id
-
-        vcd_network = self.get_vcd_network(network_uuid=net_id)
-        if vcd_network is not None and vcd_network:
-            if self.delete_network_action(network_uuid=net_id):
-                return net_id
-        else:
-            raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
-
-    def refresh_nets_status(self, net_list):
-        """Get the status of the networks
-           Params: the list of network identifiers
-           Returns a dictionary with:
-                net_id:         #VIM id of this network
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, INACTIVE, DOWN (admin down),
-                                #  BUILD (on building process)
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-
-        """
-
-        dict_entry = {}
-        try:
-            for net in net_list:
-                errormsg = ''
-                vcd_network = self.get_vcd_network(network_uuid=net)
-                if vcd_network is not None and vcd_network:
-                    if vcd_network['status'] == '1':
-                        status = 'ACTIVE'
-                    else:
-                        status = 'DOWN'
-                else:
-                    status = 'DELETED'
-                    errormsg = 'Network not found.'
-
-                dict_entry[net] = {'status': status, 'error_msg': errormsg,
-                                   'vim_info': yaml.safe_dump(vcd_network)}
-        except:
-            self.logger.debug("Error in refresh_nets_status")
-            self.logger.debug(traceback.format_exc())
-
-        return dict_entry
-
-    def get_flavor(self, flavor_id):
-        """Obtain flavor details from the  VIM
-            Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
-        """
-        if flavor_id not in vimconnector.flavorlist:
-            raise vimconn.vimconnNotFoundException("Flavor not found.")
-        return vimconnector.flavorlist[flavor_id]
-
-    def new_flavor(self, flavor_data):
-        """Adds a tenant flavor to VIM
-            flavor_data contains a dictionary with information, keys:
-                name: flavor name
-                ram: memory (cloud type) in MBytes
-                vpcus: cpus (cloud type)
-                extended: EPA parameters
-                  - numas: #items requested in same NUMA
-                        memory: number of 1G huge pages memory
-                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
-                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
-                          - name: interface name
-                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
-                            bandwidth: X Gbps; requested guarantee bandwidth
-                            vpci: requested virtual PCI address
-                disk: disk size
-                is_public:
-                 #TODO to concrete
-        Returns the flavor identifier"""
-
-        # generate a new uuid put to internal dict and return it.
-        self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
-        new_flavor=flavor_data
-        ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
-        cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
-        disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
-
-        if not isinstance(ram, int):
-            raise vimconn.vimconnException("Non-integer value for ram")
-        elif not isinstance(cpu, int):
-            raise vimconn.vimconnException("Non-integer value for cpu")
-        elif not isinstance(disk, int):
-            raise vimconn.vimconnException("Non-integer value for disk")
-
-        extended_flv = flavor_data.get("extended")
-        if extended_flv:
-            numas=extended_flv.get("numas")
-            if numas:
-                for numa in numas:
-                    #overwrite ram and vcpus
-                    if 'memory' in numa:
-                        ram = numa['memory']*1024
-                    if 'paired-threads' in numa:
-                        cpu = numa['paired-threads']*2
-                    elif 'cores' in numa:
-                        cpu = numa['cores']
-                    elif 'threads' in numa:
-                        cpu = numa['threads']
-
-        new_flavor[FLAVOR_RAM_KEY] = ram
-        new_flavor[FLAVOR_VCPUS_KEY] = cpu
-        new_flavor[FLAVOR_DISK_KEY] = disk
-        # generate a new uuid put to internal dict and return it.
-        flavor_id = uuid.uuid4()
-        vimconnector.flavorlist[str(flavor_id)] = new_flavor
-        self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
-
-        return str(flavor_id)
-
-    def delete_flavor(self, flavor_id):
-        """Deletes a tenant flavor from VIM identify by its id
-
-           Returns the used id or raise an exception
-        """
-        if flavor_id not in vimconnector.flavorlist:
-            raise vimconn.vimconnNotFoundException("Flavor not found.")
-
-        vimconnector.flavorlist.pop(flavor_id, None)
-        return flavor_id
-
-    def new_image(self, image_dict):
-        """
-        Adds a tenant image to VIM
-        Returns:
-            200, image-id        if the image is created
-            <0, message          if there is an error
-        """
-
-        return self.get_image_id_from_path(image_dict['location'])
-
-    def delete_image(self, image_id):
-        """
-            Deletes a tenant image from VIM
-            Args:
-                image_id is ID of Image to be deleted
-            Return:
-                returns the image identifier in UUID format or raises an exception on error
-        """
-        conn = self.connect_as_admin()
-        if not conn:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD")
-        # Get Catalog details
-        url_list = [self.url, '/api/catalog/', image_id]
-        catalog_herf = ''.join(url_list)
-
-        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                  'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
-
-        response = self.perform_request(req_type='GET',
-                                        url=catalog_herf,
-                                        headers=headers)
-
-        if response.status_code != requests.codes.ok:
-            self.logger.debug("delete_image():GET REST API call {} failed. "\
-                              "Return status code {}".format(catalog_herf,
-                                                             response.status_code))
-            raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
-
-        lxmlroot_respond = lxmlElementTree.fromstring(response.content)
-        namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
-        #For python3
-        #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
-        namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
-
-        catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
-        catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
-        for catalogItem in catalogItems:
-            catalogItem_href = catalogItem.attrib['href']
-
-            response = self.perform_request(req_type='GET',
-                                        url=catalogItem_href,
-                                        headers=headers)
-
-            if response.status_code != requests.codes.ok:
-                self.logger.debug("delete_image():GET REST API call {} failed. "\
-                                  "Return status code {}".format(catalog_herf,
-                                                                 response.status_code))
-                raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
-                                                                                    catalogItem,
-                                                                                    image_id))
-
-            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
-            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
-            #For python3
-            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
-            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
-            catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
-
-            #Remove catalogItem
-            response = self.perform_request(req_type='DELETE',
-                                        url=catalogitem_remove_href,
-                                        headers=headers)
-            if response.status_code == requests.codes.no_content:
-                self.logger.debug("Deleted Catalog item {}".format(catalogItem))
-            else:
-                raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
-
-        #Remove catalog
-        url_list = [self.url, '/api/admin/catalog/', image_id]
-        catalog_remove_herf = ''.join(url_list)
-        response = self.perform_request(req_type='DELETE',
-                                        url=catalog_remove_herf,
-                                        headers=headers)
-
-        if response.status_code == requests.codes.no_content:
-            self.logger.debug("Deleted Catalog {}".format(image_id))
-            return image_id
-        else:
-            raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
-
-
-    def catalog_exists(self, catalog_name, catalogs):
-        """
-
-        :param catalog_name:
-        :param catalogs:
-        :return:
-        """
-        for catalog in catalogs:
-            if catalog['name'] == catalog_name:
-                return catalog['id']
-
-    def create_vimcatalog(self, vca=None, catalog_name=None):
-        """ Create new catalog entry in vCloud director.
-
-            Args
-                vca:  vCloud director.
-                catalog_name catalog that client wish to create.   Note no validation done for a name.
-                Client must make sure that provide valid string representation.
-
-             Returns catalog id if catalog created else None.
-
-        """
-        try:
-            lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
-            if lxml_catalog_element:
-                id_attr_value = lxml_catalog_element.get('id')  # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
-                return id_attr_value.split(':')[-1]
-            catalogs = vca.list_catalogs()
-        except Exception as ex:
-            self.logger.error(
-                'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
-            raise
-        return self.catalog_exists(catalog_name, catalogs)
-
-    # noinspection PyIncorrectDocstring
-    def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
-                   description='', progress=False, chunk_bytes=128 * 1024):
-        """
-        Uploads a OVF file to a vCloud catalog
-
-        :param chunk_bytes:
-        :param progress:
-        :param description:
-        :param image_name:
-        :param vca:
-        :param catalog_name: (str): The name of the catalog to upload the media.
-        :param media_file_name: (str): The name of the local media file to upload.
-        :return: (bool) True if the media file was successfully uploaded, false otherwise.
-        """
-        os.path.isfile(media_file_name)
-        statinfo = os.stat(media_file_name)
-
-        #  find a catalog entry where we upload OVF.
-        #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
-        #  status change.
-        #  if VCD can parse OVF we upload VMDK file
-        try:
-            for catalog in vca.list_catalogs():
-                if catalog_name != catalog['name']:
-                    continue
-                catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
-                data = """
-                <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
-                """.format(catalog_name, description)
-
-                if self.client:
-                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
-
-                response = self.perform_request(req_type='POST',
-                                                url=catalog_href,
-                                                headers=headers,
-                                                data=data)
-
-                if response.status_code == requests.codes.created:
-                    catalogItem = XmlElementTree.fromstring(response.content)
-                    entity = [child for child in catalogItem if
-                              child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
-                    href = entity.get('href')
-                    template = href
-
-                    response = self.perform_request(req_type='GET',
-                                                    url=href,
-                                                    headers=headers)
-
-                    if response.status_code == requests.codes.ok:
-                        headers['Content-Type'] = 'Content-Type text/xml'
-                        result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
-                        if result:
-                            transfer_href = result.group(1)
-
-                        response = self.perform_request(req_type='PUT',
-                                                    url=transfer_href,
-                                                    headers=headers,
-                                                    data=open(media_file_name, 'rb'))
-                        if response.status_code != requests.codes.ok:
-                            self.logger.debug(
-                                "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
-                                                                                                      media_file_name))
-                            return False
-
-                    # TODO fix this with aync block
-                    time.sleep(5)
-
-                    self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
-
-                    # uploading VMDK file
-                    # check status of OVF upload and upload remaining files.
-                    response = self.perform_request(req_type='GET',
-                                                    url=template,
-                                                    headers=headers)
-
-                    if response.status_code == requests.codes.ok:
-                        result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
-                        if result:
-                            link_href = result.group(1)
-                        # we skip ovf since it already uploaded.
-                        if 'ovf' in link_href:
-                            continue
-                        # The OVF file and VMDK must be in a same directory
-                        head, tail = os.path.split(media_file_name)
-                        file_vmdk = head + '/' + link_href.split("/")[-1]
-                        if not os.path.isfile(file_vmdk):
-                            return False
-                        statinfo = os.stat(file_vmdk)
-                        if statinfo.st_size == 0:
-                            return False
-                        hrefvmdk = link_href
-
-                        if progress:
-                            widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
-                                           FileTransferSpeed()]
-                            progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
-                        bytes_transferred = 0
-                        f = open(file_vmdk, 'rb')
-                        while bytes_transferred < statinfo.st_size:
-                            my_bytes = f.read(chunk_bytes)
-                            if len(my_bytes) <= chunk_bytes:
-                                headers['Content-Range'] = 'bytes %s-%s/%s' % (
-                                    bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
-                                headers['Content-Length'] = str(len(my_bytes))
-                                response = requests.put(url=hrefvmdk,
-                                                         headers=headers,
-                                                         data=my_bytes,
-                                                         verify=False)
-                                if response.status_code == requests.codes.ok:
-                                    bytes_transferred += len(my_bytes)
-                                    if progress:
-                                        progress_bar.update(bytes_transferred)
-                                else:
-                                    self.logger.debug(
-                                        'file upload failed with error: [%s] %s' % (response.status_code,
-                                                                                        response.content))
-
-                                    f.close()
-                                    return False
-                        f.close()
-                        if progress:
-                            progress_bar.finish()
-                            time.sleep(10)
-                    return True
-                else:
-                    self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
-                                      format(catalog_name, media_file_name))
-                    return False
-        except Exception as exp:
-            self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
-                .format(catalog_name,media_file_name, exp))
-            raise vimconn.vimconnException(
-                "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
-                .format(catalog_name,media_file_name, exp))
-
-        self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
-        return False
-
-    def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
-        """Upload media file"""
-        # TODO add named parameters for readability
-
-        return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
-                               media_file_name=medial_file_name, description='medial_file_name', progress=progress)
-
-    def validate_uuid4(self, uuid_string=None):
-        """  Method validate correct format of UUID.
-
-        Return: true if string represent valid uuid
-        """
-        try:
-            val = uuid.UUID(uuid_string, version=4)
-        except ValueError:
-            return False
-        return True
-
-    def get_catalogid(self, catalog_name=None, catalogs=None):
-        """  Method check catalog and return catalog ID in UUID format.
-
-        Args
-            catalog_name: catalog name as string
-            catalogs:  list of catalogs.
-
-        Return: catalogs uuid
-        """
-
-        for catalog in catalogs:
-            if catalog['name'] == catalog_name:
-                catalog_id = catalog['id']
-                return catalog_id
-        return None
-
-    def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
-        """  Method check catalog and return catalog name lookup done by catalog UUID.
-
-        Args
-            catalog_name: catalog name as string
-            catalogs:  list of catalogs.
-
-        Return: catalogs name or None
-        """
-
-        if not self.validate_uuid4(uuid_string=catalog_uuid):
-            return None
-
-        for catalog in catalogs:
-            catalog_id = catalog.get('id')
-            if catalog_id == catalog_uuid:
-                return catalog.get('name')
-        return None
-
-    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
-        """  Method check catalog and return catalog name lookup done by catalog UUID.
-
-        Args
-            catalog_name: catalog name as string
-            catalogs:  list of catalogs.
-
-        Return: catalogs name or None
-        """
-
-        if not self.validate_uuid4(uuid_string=catalog_uuid):
-            return None
-
-        for catalog in catalogs:
-            catalog_id = catalog.get('id')
-            if catalog_id == catalog_uuid:
-                return catalog
-        return None
-
-    def get_image_id_from_path(self, path=None, progress=False):
-        """  Method upload OVF image to vCloud director.
-
-        Each OVF image represented as single catalog entry in vcloud director.
-        The method check for existing catalog entry.  The check done by file name without file extension.
-
-        if given catalog name already present method will respond with existing catalog uuid otherwise
-        it will create new catalog entry and upload OVF file to newly created catalog.
-
-        If method can't create catalog entry or upload a file it will throw exception.
-
-        Method accept boolean flag progress that will output progress bar. It useful method
-        for standalone upload use case. In case to test large file upload.
-
-        Args
-            path: - valid path to OVF file.
-            progress - boolean progress bar show progress bar.
-
-        Return: if image uploaded correct method will provide image catalog UUID.
-        """
-
-        if not path:
-            raise vimconn.vimconnException("Image path can't be None.")
-
-        if not os.path.isfile(path):
-            raise vimconn.vimconnException("Can't read file. File not found.")
-
-        if not os.access(path, os.R_OK):
-            raise vimconn.vimconnException("Can't read file. Check file permission to read.")
-
-        self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
-
-        dirpath, filename = os.path.split(path)
-        flname, file_extension = os.path.splitext(path)
-        if file_extension != '.ovf':
-            self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
-            raise vimconn.vimconnException("Wrong container.  vCloud director supports only OVF.")
-
-        catalog_name = os.path.splitext(filename)[0]
-        catalog_md5_name = hashlib.md5(path).hexdigest()
-        self.logger.debug("File name {} Catalog Name {} file path {} "
-                          "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
-
-        try:
-            org,vdc = self.get_vdc_details()
-            catalogs = org.list_catalogs()
-        except Exception as exp:
-            self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
-            raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
-
-        if len(catalogs) == 0:
-            self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
-            if self.create_vimcatalog(org, catalog_md5_name) is None:
-                raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
-
-            result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
-                                          media_name=filename, medial_file_name=path, progress=progress)
-            if not result:
-                raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
-            return self.get_catalogid(catalog_name, catalogs)
-        else:
-            for catalog in catalogs:
-                # search for existing catalog if we find same name we return ID
-                # TODO optimize this
-                if catalog['name'] == catalog_md5_name:
-                    self.logger.debug("Found existing catalog entry for {} "
-                                      "catalog id {}".format(catalog_name,
-                                                             self.get_catalogid(catalog_md5_name, catalogs)))
-                    return self.get_catalogid(catalog_md5_name, catalogs)
-
-        # if we didn't find existing catalog we create a new one and upload image.
-        self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
-        if self.create_vimcatalog(org, catalog_md5_name) is None:
-            raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
-
-        result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
-                                      media_name=filename, medial_file_name=path, progress=progress)
-        if not result:
-            raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
-
-        return self.get_catalogid(catalog_md5_name, org.list_catalogs())
-
-    def get_image_list(self, filter_dict={}):
-        '''Obtain tenant images from VIM
-        Filter_dict can be:
-            name: image name
-            id: image uuid
-            checksum: image checksum
-            location: image path
-        Returns the image list of dictionaries:
-            [{<the fields at Filter_dict plus some VIM specific>}, ...]
-            List can be empty
-        '''
-
-        try:
-            org, vdc = self.get_vdc_details()
-            image_list = []
-            catalogs = org.list_catalogs()
-            if len(catalogs) == 0:
-                return image_list
-            else:
-                for catalog in catalogs:
-                    catalog_uuid = catalog.get('id')
-                    name = catalog.get('name')
-                    filtered_dict = {}
-                    if filter_dict.get("name") and filter_dict["name"] != name:
-                        continue
-                    if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
-                        continue
-                    filtered_dict ["name"] = name
-                    filtered_dict ["id"] = catalog_uuid
-                    image_list.append(filtered_dict)
-
-                self.logger.debug("List of already created catalog items: {}".format(image_list))
-                return image_list
-        except Exception as exp:
-            raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
-
-    def get_vappid(self, vdc=None, vapp_name=None):
-        """ Method takes vdc object and vApp name and returns vapp uuid or None
-
-        Args:
-            vdc: The VDC object.
-            vapp_name: is application vappp name identifier
-
-        Returns:
-                The return vApp name otherwise None
-        """
-        if vdc is None or vapp_name is None:
-            return None
-        # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
-        try:
-            refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
-                          vdc.ResourceEntities.ResourceEntity)
-            #For python3
-            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
-            #         if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
-            if len(refs) == 1:
-                return refs[0].href.split("vapp")[1][1:]
-        except Exception as e:
-            self.logger.exception(e)
-            return False
-        return None
-
-    def check_vapp(self, vdc=None, vapp_uuid=None):
-        """ Method Method returns True or False if vapp deployed in vCloud director
-
-            Args:
-                vca: Connector to VCA
-                vdc: The VDC object.
-                vappid: vappid is application identifier
-
-            Returns:
-                The return True if vApp deployed
-                :param vdc:
-                :param vapp_uuid:
-        """
-        try:
-            refs = filter(lambda ref:
-                          ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
-                          vdc.ResourceEntities.ResourceEntity)
-            #For python3
-            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
-            #         if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
-            for ref in refs:
-                vappid = ref.href.split("vapp")[1][1:]
-                # find vapp with respected vapp uuid
-                if vappid == vapp_uuid:
-                    return True
-        except Exception as e:
-            self.logger.exception(e)
-            return False
-        return False
-
-    def get_namebyvappid(self, vapp_uuid=None):
-        """Method returns vApp name from vCD and lookup done by vapp_id.
-
-        Args:
-            vapp_uuid: vappid is application identifier
-
-        Returns:
-            The return vApp name otherwise None
-        """
-        try:
-            if self.client and vapp_uuid:
-                vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-                response = self.perform_request(req_type='GET',
-                                                url=vapp_call,
-                                                headers=headers)
-                #Retry login if session expired & retry sending request
-                if response.status_code == 403:
-                    response = self.retry_rest('GET', vapp_call)
-
-                tree = XmlElementTree.fromstring(response.content)
-                return tree.attrib['name']
-        except Exception as e:
-            self.logger.exception(e)
-            return None
-        return None
-
-    def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
-                       cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
-        """Adds a VM instance to VIM
-        Params:
-            'start': (boolean) indicates if VM must start or created in pause mode.
-            'image_id','flavor_id': image and flavor VIM id to use for the VM
-            'net_list': list of interfaces, each one is a dictionary with:
-                'name': (optional) name for the interface.
-                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
-                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
-                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
-                'mac_address': (optional) mac address to assign to this interface
-                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
-                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
-                'type': (mandatory) can be one of:
-                    'virtual', in this case always connected to a network of type 'net_type=bridge'
-                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
-                           can created unconnected
-                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
-                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
-                            are allocated on the same physical NIC
-                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
-                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
-                                or True, it must apply the default VIM behaviour
-                After execution the method will add the key:
-                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
-                        interface. 'net_list' is modified
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        self.logger.info("Creating new instance for entry {}".format(name))
-        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
-                          "availability_zone_index {} availability_zone_list {}"\
-                          .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
-                                  availability_zone_index, availability_zone_list))
-
-        #new vm name = vmname + tenant_id + uuid
-        new_vm_name = [name, '-', str(uuid.uuid4())]
-        vmname_andid = ''.join(new_vm_name)
-
-        for net in net_list:
-            if net['type'] == "PCI-PASSTHROUGH":
-                raise vimconn.vimconnNotSupportedException(
-                      "Current vCD version does not support type : {}".format(net['type']))
-
-        if len(net_list) > 10:
-            raise vimconn.vimconnNotSupportedException(
-                      "The VM hardware versions 7 and above support upto 10 NICs only")
-
-        # if vm already deployed we return existing uuid
-        # we check for presence of VDC, Catalog entry and Flavor.
-        org, vdc = self.get_vdc_details()
-        if vdc is None:
-            raise vimconn.vimconnNotFoundException(
-                "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
-        catalogs = org.list_catalogs()
-        if catalogs is None:
-            #Retry once, if failed by refreshing token
-            self.get_token()
-            org = Org(self.client, resource=self.client.get_org())
-            catalogs = org.list_catalogs()
-        if catalogs is None:
-            raise vimconn.vimconnNotFoundException(
-                "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
-
-        catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
-        if catalog_hash_name:
-            self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
-        else:
-            raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
-                                                   "(Failed retrieve catalog information {})".format(name, image_id))
-
-        # Set vCPU and Memory based on flavor.
-        vm_cpus = None
-        vm_memory = None
-        vm_disk = None
-        numas = None
-
-        if flavor_id is not None:
-            if flavor_id not in vimconnector.flavorlist:
-                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
-                                                       "Failed retrieve flavor information "
-                                                       "flavor id {}".format(name, flavor_id))
-            else:
-                try:
-                    flavor = vimconnector.flavorlist[flavor_id]
-                    vm_cpus = flavor[FLAVOR_VCPUS_KEY]
-                    vm_memory = flavor[FLAVOR_RAM_KEY]
-                    vm_disk = flavor[FLAVOR_DISK_KEY]
-                    extended = flavor.get("extended", None)
-                    if extended:
-                        numas=extended.get("numas", None)
-
-                except Exception as exp:
-                    raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
-
-        # image upload creates template name as catalog name space Template.
-        templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
-        power_on = 'false'
-        if start:
-            power_on = 'true'
-
-        # client must provide at least one entry in net_list if not we report error
-        #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
-        #If no mgmt, then the 1st NN in netlist is considered as primary net. 
-        primary_net = None
-        primary_netname = None
-        primary_net_href = None
-        network_mode = 'bridged'
-        if net_list is not None and len(net_list) > 0:
-            for net in net_list:
-                if 'use' in net and net['use'] == 'mgmt' and not primary_net:
-                    primary_net = net
-            if primary_net is None:
-                primary_net = net_list[0]
-
-            try:
-                primary_net_id = primary_net['net_id']
-                url_list = [self.url, '/api/network/', primary_net_id]
-                primary_net_href = ''.join(url_list) 
-                network_dict = self.get_vcd_network(network_uuid=primary_net_id)
-                if 'name' in network_dict:
-                    primary_netname = network_dict['name']
-
-            except KeyError:
-                raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
-        else:
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
-
-        # use: 'data', 'bridge', 'mgmt'
-        # create vApp.  Set vcpu and ram based on flavor id.
-        try:
-            vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
-            if not vdc_obj:
-                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
-
-            for retry in (1,2):
-                items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
-                catalog_items = [items.attrib]
-
-                if len(catalog_items) == 1:
-                    if self.client:
-                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-                    response = self.perform_request(req_type='GET',
-                                                url=catalog_items[0].get('href'),
-                                                headers=headers)
-                    catalogItem = XmlElementTree.fromstring(response.content)
-                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
-                    vapp_tempalte_href = entity.get("href")
-
-                response = self.perform_request(req_type='GET',
-                                                    url=vapp_tempalte_href,
-                                                    headers=headers)
-                if response.status_code != requests.codes.ok:
-                    self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
-                                                                                           response.status_code))
-                else:
-                    result = (response.content).replace("\n"," ")
-
-                vapp_template_tree = XmlElementTree.fromstring(response.content)
-                children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
-                vm_element = [child for child in children_element if 'Vm' in child.tag][0]
-                vm_name = vm_element.get('name')
-                vm_id = vm_element.get('id')
-                vm_href = vm_element.get('href')
-
-                cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
-                memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
-                cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
-
-                headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
-                vdc_id = vdc.get('id').split(':')[-1]
-                instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
-                                                                                                vdc_id)
-                data = """<?xml version="1.0" encoding="UTF-8"?>
-                <InstantiateVAppTemplateParams
-                xmlns="http://www.vmware.com/vcloud/v1.5"
-                name="{}"
-                deploy="false"
-                powerOn="false"
-                xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-                xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
-                <Description>Vapp instantiation</Description>
-                <InstantiationParams>
-                     <NetworkConfigSection>
-                         <ovf:Info>Configuration parameters for logical networks</ovf:Info>
-                         <NetworkConfig networkName="{}">
-                             <Configuration>
-                                 <ParentNetwork href="{}" />
-                                 <FenceMode>bridged</FenceMode>
-                             </Configuration>
-                         </NetworkConfig>
-                     </NetworkConfigSection>
-                <LeaseSettingsSection
-                type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
-                <ovf:Info>Lease Settings</ovf:Info>
-                <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
-                <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
-                </LeaseSettingsSection>
-                </InstantiationParams>
-                <Source href="{}"/>
-                <SourcedItem>
-                <Source href="{}" id="{}" name="{}"
-                type="application/vnd.vmware.vcloud.vm+xml"/>
-                <VmGeneralParams>
-                    <NeedsCustomization>false</NeedsCustomization>
-                </VmGeneralParams>
-                <InstantiationParams>
-                      <NetworkConnectionSection>
-                      <ovf:Info>Specifies the available VM network connections</ovf:Info>
-                      <NetworkConnection network="{}">
-                      <NetworkConnectionIndex>0</NetworkConnectionIndex>
-                      <IsConnected>true</IsConnected>
-                      <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
-                      </NetworkConnection>
-                      </NetworkConnectionSection><ovf:VirtualHardwareSection>
-                      <ovf:Info>Virtual hardware requirements</ovf:Info>
-                      <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
-                      xmlns:vmw="http://www.vmware.com/schema/ovf">
-                      <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
-                      <rasd:Description>Number of Virtual CPUs</rasd:Description>
-                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
-                      <rasd:InstanceID>4</rasd:InstanceID>
-                      <rasd:Reservation>0</rasd:Reservation>
-                      <rasd:ResourceType>3</rasd:ResourceType>
-                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
-                      <rasd:Weight>0</rasd:Weight>
-                      <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
-                      </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
-                      <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
-                      <rasd:Description>Memory Size</rasd:Description>
-                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
-                      <rasd:InstanceID>5</rasd:InstanceID>
-                      <rasd:Reservation>0</rasd:Reservation>
-                      <rasd:ResourceType>4</rasd:ResourceType>
-                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
-                      <rasd:Weight>0</rasd:Weight>
-                      </ovf:Item>
-                </ovf:VirtualHardwareSection>
-                </InstantiationParams>
-                </SourcedItem>
-                <AllEULAsAccepted>false</AllEULAsAccepted>
-                </InstantiateVAppTemplateParams>""".format(vmname_andid,
-                                                        primary_netname,
-                                                        primary_net_href,
-                                                     vapp_tempalte_href,
-                                                                vm_href,
-                                                                  vm_id,
-                                                                vm_name,
-                                                        primary_netname,
-                                                               cpu=cpus,
-                                                             core=cores,
-                                                       memory=memory_mb)
-
-                response = self.perform_request(req_type='POST',
-                                                url=instantiate_vapp_href,
-                                                headers=headers,
-                                                data=data)
-
-                if response.status_code != 201:
-                    self.logger.error("REST call {} failed reason : {}"\
-                         "status code : {}".format(instantiate_vapp_href,
-                                                        response.content,
-                                                   response.status_code))
-                    raise vimconn.vimconnException("new_vminstance(): Failed to create"\
-                                                        "vAapp {}".format(vmname_andid))
-                else:
-                    vapptask = self.get_task_from_response(response.content)
-
-                if vapptask is None and retry==1:
-                    self.get_token() # Retry getting token
-                    continue
-                else:
-                    break
-
-            if vapptask is None or vapptask is False:
-                raise vimconn.vimconnUnexpectedResponse(
-                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
-
-            # wait for task to complete
-            result = self.client.get_task_monitor().wait_for_success(task=vapptask)
-
-            if result.get('status') == 'success':
-                self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
-            else:
-                raise vimconn.vimconnUnexpectedResponse(
-                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
-
-        except Exception as exp:
-            raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
-
-        # we should have now vapp in undeployed state.
-        try:
-            vdc_obj = VDC(self.client, href=vdc.get('href'))
-            vapp_resource = vdc_obj.get_vapp(vmname_andid)
-            vapp_uuid = vapp_resource.get('id').split(':')[-1]
-            vapp = VApp(self.client, resource=vapp_resource)
-
-        except Exception as exp:
-            raise vimconn.vimconnUnexpectedResponse(
-                    "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
-                    .format(vmname_andid, exp))
-
-        if vapp_uuid is None:
-            raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): Failed to retrieve vApp {} after creation".format(
-                                                                            vmname_andid))
-
-        #Add PCI passthrough/SRIOV configrations
-        vm_obj = None
-        pci_devices_info = []
-        reserve_memory = False
-
-        for net in net_list:
-            if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
-                pci_devices_info.append(net)
-            elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
-                reserve_memory = True
-
-        #Add PCI
-        if len(pci_devices_info) > 0:
-            self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
-                                                                        vmname_andid ))
-            PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
-                                                                            pci_devices_info,
-                                                                            vmname_andid)
-            if PCI_devices_status:
-                self.logger.info("Added PCI devives {} to VM {}".format(
-                                                            pci_devices_info,
-                                                            vmname_andid)
-                                 )
-                reserve_memory = True
-            else:
-                self.logger.info("Fail to add PCI devives {} to VM {}".format(
-                                                            pci_devices_info,
-                                                            vmname_andid)
-                                 )
-
-        # Modify vm disk
-        if vm_disk:
-            #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
-            result = self.modify_vm_disk(vapp_uuid, vm_disk)
-            if result :
-                self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
-
-        #Add new or existing disks to vApp
-        if disk_list:
-            added_existing_disk = False
-            for disk in disk_list:
-                if 'device_type' in disk and disk['device_type'] == 'cdrom':
-                    image_id = disk['image_id']
-                    # Adding CD-ROM to VM
-                    # will revisit code once specification ready to support this feature
-                    self.insert_media_to_vm(vapp, image_id)
-                elif "image_id" in disk and disk["image_id"] is not None:
-                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
-                                                                    disk["image_id"] , vapp_uuid))
-                    self.add_existing_disk(catalogs=catalogs,
-                                           image_id=disk["image_id"],
-                                           size = disk["size"],
-                                           template_name=templateName,
-                                           vapp_uuid=vapp_uuid
-                                           )
-                    added_existing_disk = True
-                else:
-                    #Wait till added existing disk gets reflected into vCD database/API
-                    if added_existing_disk:
-                        time.sleep(5)
-                        added_existing_disk = False
-                    self.add_new_disk(vapp_uuid, disk['size'])
-
-        if numas:
-            # Assigning numa affinity setting
-            for numa in numas:
-                if 'paired-threads-id' in numa:
-                    paired_threads_id = numa['paired-threads-id']
-                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
-
-        # add NICs & connect to networks in netlist
-        try:
-            vdc_obj = VDC(self.client, href=vdc.get('href'))
-            vapp_resource = vdc_obj.get_vapp(vmname_andid)
-            vapp = VApp(self.client, resource=vapp_resource)
-            vapp_id = vapp_resource.get('id').split(':')[-1]
-
-            self.logger.info("Removing primary NIC: ")
-            # First remove all NICs so that NIC properties can be adjusted as needed
-            self.remove_primary_network_adapter_from_all_vms(vapp)
-
-            self.logger.info("Request to connect VM to a network: {}".format(net_list))
-            primary_nic_index = 0
-            nicIndex = 0
-            for net in net_list:
-                # openmano uses network id in UUID format.
-                # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
-                # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
-                #   'vpci': '0000:00:11.0', 'name': 'eth0'}]
-
-                if 'net_id' not in net:
-                    continue
-
-                #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
-                #Same will be returned in refresh_vms_status() as vim_interface_id
-                net['vim_id'] = net['net_id']  # Provide the same VIM identifier as the VIM network
-
-                interface_net_id = net['net_id']
-                interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
-                interface_network_mode = net['use']
-
-                if interface_network_mode == 'mgmt':
-                    primary_nic_index = nicIndex
-
-                """- POOL (A static IP address is allocated automatically from a pool of addresses.)
-                                  - DHCP (The IP address is obtained from a DHCP service.)
-                                  - MANUAL (The IP address is assigned manually in the IpAddress element.)
-                                  - NONE (No IP addressing mode specified.)"""
-
-                if primary_netname is not None:
-                    self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
-                    nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
-                    #For python3
-                    #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
-                    if len(nets) == 1:
-                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
-
-                        if interface_net_name != primary_netname:
-                            # connect network to VM - with all DHCP by default
-                            self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
-                            self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
-
-                        type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
-                        nic_type = 'VMXNET3'
-                        if 'type' in net and net['type'] not in type_list:
-                            # fetching nic type from vnf
-                            if 'model' in net:
-                                if net['model'] is not None:
-                                    if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
-                                        nic_type = 'VMXNET3'
-                                else:
-                                    nic_type = net['model']
-
-                                self.logger.info("new_vminstance(): adding network adapter "\
-                                                          "to a network {}".format(nets[0].get('name')))
-                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
-                                                                primary_nic_index,
-                                                                nicIndex,
-                                                                net,
-                                                                nic_type=nic_type)
-                            else:
-                                self.logger.info("new_vminstance(): adding network adapter "\
-                                                         "to a network {}".format(nets[0].get('name')))
-                                if net['type'] in ['SR-IOV', 'VF']:
-                                    nic_type = net['type']
-                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
-                                                                primary_nic_index,
-                                                                nicIndex,
-                                                                net,
-                                                                nic_type=nic_type)
-                nicIndex += 1
-
-            # cloud-init for ssh-key injection
-            if cloud_config:
-                # Create a catalog which will be carrying the config drive ISO
-                # This catalog is deleted during vApp deletion. The catalog name carries
-                # vApp UUID and thats how it gets identified during its deletion.
-                config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
-                self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
-                    config_drive_catalog_name))
-                config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
-                if config_drive_catalog_id is None:
-                    error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
-                                "ISO".format(config_drive_catalog_name)
-                    raise Exception(error_msg)
-
-                # Create config-drive ISO
-                _, userdata = self._create_user_data(cloud_config)
-                # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
-                iso_path = self.create_config_drive_iso(userdata)
-                self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
-
-                self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
-                self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
-                # Attach the config-drive ISO to the VM
-                self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
-                # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
-                time.sleep(5)
-                self.insert_media_to_vm(vapp, config_drive_catalog_id)
-                shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
-
-            # If VM has PCI devices or SRIOV reserve memory for VM
-            if reserve_memory:
-                self.reserve_memory_for_all_vms(vapp, memory_mb)
-
-            self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
-
-            poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
-            result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
-            if result.get('status') == 'success':
-                self.logger.info("new_vminstance(): Successfully power on "\
-                                             "vApp {}".format(vmname_andid))
-            else:
-                self.logger.error("new_vminstance(): failed to power on vApp "\
-                                                     "{}".format(vmname_andid))
-
-        except Exception as exp:
-            try:
-                self.delete_vminstance(vapp_uuid)
-            except Exception as exp2:
-                self.logger.error("new_vminstance rollback fail {}".format(exp2))
-            # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
-            self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
-                              .format(name, exp))
-            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
-                                           .format(name, exp))
-
-        # check if vApp deployed and if that the case return vApp UUID otherwise -1
-        wait_time = 0
-        vapp_uuid = None
-        while wait_time <= MAX_WAIT_TIME:
-            try:
-                vapp_resource = vdc_obj.get_vapp(vmname_andid)
-                vapp = VApp(self.client, resource=vapp_resource)
-            except Exception as exp:
-                raise vimconn.vimconnUnexpectedResponse(
-                        "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
-                        .format(vmname_andid, exp))
-
-            #if vapp and vapp.me.deployed:
-            if vapp and vapp_resource.get('deployed') == 'true':
-                vapp_uuid = vapp_resource.get('id').split(':')[-1]
-                break
-            else:
-                self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
-                time.sleep(INTERVAL_TIME)
-
-            wait_time +=INTERVAL_TIME
-
-        #SET Affinity Rule for VM
-        #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
-        #While creating VIM account user has to pass the Host Group names in availability_zone list
-        #"availability_zone" is a  part of VIM "config" parameters
-        #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
-        #Host groups are referred as availability zones
-        #With following procedure, deployed VM will be added into a VM group.
-        #Then A VM to Host Affinity rule will be created using the VM group & Host group.
-        if(availability_zone_list):
-            self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
-            #Admin access required for creating Affinity rules
-            client = self.connect_as_admin()
-            if not client:
-                raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
-            else:
-                self.client = client
-            if self.client:
-                headers = {'Accept':'application/*+xml;version=27.0',
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            #Step1: Get provider vdc details from organization
-            pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
-            if pvdc_href is not None:
-            #Step2: Found required pvdc, now get resource pool information
-                respool_href = self.get_resource_pool_details(pvdc_href, headers)
-                if respool_href is None:
-                    #Raise error if respool_href not found
-                    msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
-                           .format(pvdc_href)
-                    self.log_message(msg)
-
-            #Step3: Verify requested availability zone(hostGroup) is present in vCD
-            # get availability Zone
-            vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
-            # check if provided av zone(hostGroup) is present in vCD VIM
-            status = self.check_availibility_zone(vm_az, respool_href, headers)
-            if status is False:
-                msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
-                       "resource pool {} status: {}".format(vm_az,respool_href,status)
-                self.log_message(msg)
-            else:
-                self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
-
-            #Step4: Find VM group references to create vm group
-            vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
-            if vmgrp_href == None:
-                msg = "new_vminstance(): No reference to VmGroup found in resource pool"
-                self.log_message(msg)
-
-            #Step5: Create a VmGroup with name az_VmGroup
-            vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
-            status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
-            if status is not True:
-                msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
-                self.log_message(msg)
-
-            #VM Group url to add vms to vm group
-            vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
-
-            #Step6: Add VM to VM Group
-            #Find VM uuid from vapp_uuid
-            vm_details = self.get_vapp_details_rest(vapp_uuid)
-            vm_uuid = vm_details['vmuuid']
-
-            status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
-            if status is not True:
-                msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
-                self.log_message(msg)
-
-            #Step7: Create VM to Host affinity rule
-            addrule_href = self.get_add_rule_reference (respool_href, headers)
-            if addrule_href is None:
-                msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
-                      .format(respool_href)
-                self.log_message(msg)
-
-            status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity",  headers)
-            if status is False:
-                msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
-                      .format(name, vm_az)
-                self.log_message(msg)
-            else:
-                self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
-                                    .format(name, vm_az))
-            #Reset token to a normal user to perform other operations
-            self.get_token()
-
-        if vapp_uuid is not None:
-            return vapp_uuid, None
-        else:
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
-
-    def create_config_drive_iso(self, user_data):
-        tmpdir = tempfile.mkdtemp()
-        iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
-        latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
-        os.makedirs(latest_dir)
-        with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
-                open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
-            userdata_file_obj.write(user_data)
-            meta_file_obj.write(json.dumps({"availability_zone": "nova",
-                                            "launch_index": 0,
-                                            "name": "ConfigDrive",
-                                            "uuid": str(uuid.uuid4())}
-                                           )
-                                )
-        genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
-            iso_path=iso_path, source_dir_path=tmpdir)
-        self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
-        try:
-            FNULL = open(os.devnull, 'w')
-            subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
-        except subprocess.CalledProcessError as e:
-            shutil.rmtree(tmpdir, ignore_errors=True)
-            error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
-            self.logger.error(error_msg)
-            raise Exception(error_msg)
-        return iso_path
-
-    def upload_iso_to_catalog(self, catalog_id, iso_file_path):
-        if not os.path.isfile(iso_file_path):
-            error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
-            self.logger.error(error_msg)
-            raise Exception(error_msg)
-        iso_file_stat = os.stat(iso_file_path)
-        xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
-                            <Media
-                                xmlns="http://www.vmware.com/vcloud/v1.5"
-                                name="{iso_name}"
-                                size="{iso_size}"
-                                imageType="iso">
-                                <Description>ISO image for config-drive</Description>
-                            </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
-        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-        headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
-        catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
-        response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
-
-        if response.status_code != 201:
-            error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
-            self.logger.error(error_msg)
-            raise Exception(error_msg)
-
-        catalogItem = XmlElementTree.fromstring(response.content)
-        entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
-        entity_href = entity.get('href')
-
-        response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
-        if response.status_code != 200:
-            raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
-
-        match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
-        if match:
-            media_upload_href = match.group(1)
-        else:
-            raise Exception('Could not parse the upload URL for the media file from the last response')
-        upload_iso_task = self.get_task_from_response(response.content)
-        headers['Content-Type'] = 'application/octet-stream'
-        response = self.perform_request(req_type='PUT',
-                                        url=media_upload_href,
-                                        headers=headers,
-                                        data=open(iso_file_path, 'rb'))
-
-        if response.status_code != 200:
-            raise Exception('PUT request to "{}" failed'.format(media_upload_href))
-        result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
-        if result.get('status') != 'success':
-            raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
-
-    def get_vcd_availibility_zones(self,respool_href, headers):
-        """ Method to find presence of av zone is VIM resource pool
-
-            Args:
-                respool_href - resource pool href
-                headers - header information
-
-            Returns:
-               vcd_az - list of azone present in vCD
-        """
-        vcd_az = []
-        url=respool_href
-        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
-        else:
-        #Get the href to hostGroups and find provided hostGroup is present in it
-            resp_xml = XmlElementTree.fromstring(resp.content)
-            for child in resp_xml:
-                if 'VMWProviderVdcResourcePool' in child.tag:
-                    for schild in child:
-                        if 'Link' in schild.tag:
-                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
-                                hostGroup = schild.attrib.get('href')
-                                hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
-                                if hg_resp.status_code != requests.codes.ok:
-                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
-                                else:
-                                    hg_resp_xml =  XmlElementTree.fromstring(hg_resp.content)
-                                    for hostGroup in hg_resp_xml:
-                                        if 'HostGroup' in hostGroup.tag:
-                                            #append host group name to the list
-                                            vcd_az.append(hostGroup.attrib.get("name"))
-        return vcd_az
-
-
-    def set_availability_zones(self):
-        """
-        Set vim availability zone
-        """
-
-        vim_availability_zones = None
-        availability_zone = None
-        if 'availability_zone' in self.config:
-            vim_availability_zones = self.config.get('availability_zone')
-        if isinstance(vim_availability_zones, str):
-            availability_zone = [vim_availability_zones]
-        elif isinstance(vim_availability_zones, list):
-            availability_zone = vim_availability_zones
-        else:
-            return availability_zone
-
-        return availability_zone
-
-
-    def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
-        """
-        Return the availability zone to be used by the created VM.
-        returns: The VIM availability zone to be used or None
-        """
-        if availability_zone_index is None:
-            if not self.config.get('availability_zone'):
-                return None
-            elif isinstance(self.config.get('availability_zone'), str):
-                return self.config['availability_zone']
-            else:
-                return self.config['availability_zone'][0]
-
-        vim_availability_zones = self.availability_zone
-
-        # check if VIM offer enough availability zones describe in the VNFD
-        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
-            # check if all the names of NFV AV match VIM AV names
-            match_by_index = False
-            for av in availability_zone_list:
-                if av not in vim_availability_zones:
-                    match_by_index = True
-                    break
-            if match_by_index:
-                self.logger.debug("Required Availability zone or Host Group not found in VIM config")
-                self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
-                self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
-                self.logger.debug("VIM Availability zones will be used by index")
-                return vim_availability_zones[availability_zone_index]
-            else:
-                return availability_zone_list[availability_zone_index]
-        else:
-            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
-
-
-    def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
-        """ Method to create VM to Host Affinity rule in vCD
-
-        Args:
-            addrule_href - href to make a POST request
-            vmgrpname - name of the VM group created
-            hostgrpnmae - name of the host group created earlier
-            polarity - Affinity or Anti-affinity (default: Affinity)
-            headers - headers to make REST call
-
-        Returns:
-            True- if rule is created
-            False- Failed to create rule due to some error
-
-        """
-        task_status = False
-        rule_name = polarity + "_" + vmgrpname
-        payload = """<?xml version="1.0" encoding="UTF-8"?>
-                     <vmext:VMWVmHostAffinityRule
-                       xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
-                       xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
-                       type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
-                       <vcloud:Name>{}</vcloud:Name>
-                       <vcloud:IsEnabled>true</vcloud:IsEnabled>
-                       <vcloud:IsMandatory>true</vcloud:IsMandatory>
-                       <vcloud:Polarity>{}</vcloud:Polarity>
-                       <vmext:HostGroupName>{}</vmext:HostGroupName>
-                       <vmext:VmGroupName>{}</vmext:VmGroupName>
-                     </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
-
-        resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
-
-        if resp.status_code != requests.codes.accepted:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
-            task_status = False
-            return task_status
-        else:
-            affinity_task = self.get_task_from_response(resp.content)
-            self.logger.debug ("affinity_task: {}".format(affinity_task))
-            if affinity_task is None or affinity_task is False:
-                raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
-            # wait for task to complete
-            result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
-            if result.get('status') == 'success':
-                self.logger.debug("Successfully created affinity rule {}".format(rule_name))
-                return True
-            else:
-                raise vimconn.vimconnUnexpectedResponse(
-                      "failed to create affinity rule {}".format(rule_name))
-
-
-    def get_add_rule_reference (self, respool_href, headers):
-        """ This method finds href to add vm to host affinity rule to vCD
-
-        Args:
-            respool_href- href to resource pool
-            headers- header information to make REST call
-
-        Returns:
-            None - if no valid href to add rule found or
-            addrule_href - href to add vm to host affinity rule of resource pool
-        """
-        addrule_href = None
-        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
-        else:
-
-            resp_xml = XmlElementTree.fromstring(resp.content)
-            for child in resp_xml:
-                if 'VMWProviderVdcResourcePool' in child.tag:
-                    for schild in child:
-                        if 'Link' in schild.tag:
-                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
-                                schild.attrib.get('rel') == "add":
-                                addrule_href = schild.attrib.get('href')
-                                break
-
-        return addrule_href
-
-
-    def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
-        """ Method to add deployed VM to newly created VM Group.
-            This is required to create VM to Host affinity in vCD
-
-        Args:
-            vm_uuid- newly created vm uuid
-            vmGroupNameURL- URL to VM Group name
-            vmGroup_name- Name of VM group created
-            headers- Headers for REST request
-
-        Returns:
-            True- if VM added to VM group successfully
-            False- if any error encounter
-        """
-
-        addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
-
-        if addvm_resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
-                               .format(vmGroupNameURL, addvm_resp.status_code))
-            return False
-        else:
-            resp_xml = XmlElementTree.fromstring(addvm_resp.content)
-            for child in resp_xml:
-                if child.tag.split('}')[1] == 'Link':
-                    if child.attrib.get("rel") == "addVms":
-                        addvmtogrpURL =  child.attrib.get("href")
-
-        #Get vm details
-        url_list = [self.url, '/api/vApp/vm-',vm_uuid]
-        vmdetailsURL = ''.join(url_list)
-
-        resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
-            return False
-
-        #Parse VM details
-        resp_xml = XmlElementTree.fromstring(resp.content)
-        if resp_xml.tag.split('}')[1] == "Vm":
-            vm_id = resp_xml.attrib.get("id")
-            vm_name = resp_xml.attrib.get("name")
-            vm_href = resp_xml.attrib.get("href")
-            #print vm_id, vm_name, vm_href
-        #Add VM into VMgroup
-        payload = """<?xml version="1.0" encoding="UTF-8"?>\
-                   <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
-                    xmlns="http://www.vmware.com/vcloud/versions" \
-                    xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
-                    xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
-                    xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
-                    xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
-                    xmlns:ns7="http://www.vmware.com/schema/ovf" \
-                    xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
-                    xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
-                    <ns2:VmReference href="{}" id="{}" name="{}" \
-                    type="application/vnd.vmware.vcloud.vm+xml" />\
-                   </ns2:Vms>""".format(vm_href, vm_id, vm_name)
-
-        addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
-
-        if addvmtogrp_resp.status_code != requests.codes.accepted:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
-            return False
-        else:
-            self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
-            return True
-
-
-    def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
-        """Method to create a VM group in vCD
-
-           Args:
-              vmgroup_name : Name of VM group to be created
-              vmgroup_href : href for vmgroup
-              headers- Headers for REST request
-        """
-        #POST to add URL with required data
-        vmgroup_status = False
-        payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
-                       xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
-                   <vmCount>1</vmCount>\
-                   </VMWVmGroup>""".format(vmgroup_name)
-        resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
-
-        if resp.status_code != requests.codes.accepted:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
-            return vmgroup_status
-        else:
-            vmgroup_task = self.get_task_from_response(resp.content)
-            if vmgroup_task is None or vmgroup_task is False:
-                raise vimconn.vimconnUnexpectedResponse(
-                    "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
-
-            # wait for task to complete
-            result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
-
-            if result.get('status') == 'success':
-                self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
-                #time.sleep(10)
-                vmgroup_status = True
-                return vmgroup_status
-            else:
-                raise vimconn.vimconnUnexpectedResponse(\
-                        "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
-
-
-    def find_vmgroup_reference(self, url, headers):
-        """ Method to create a new VMGroup which is required to add created VM
-            Args:
-               url- resource pool href
-               headers- header information
-
-            Returns:
-               returns href to VM group to create VM group
-        """
-        #Perform GET on resource pool to find 'add' link to create VMGroup
-        #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
-        vmgrp_href = None
-        resp = self.perform_request(req_type='GET',url=url, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
-        else:
-            #Get the href to add vmGroup to vCD
-            resp_xml = XmlElementTree.fromstring(resp.content)
-            for child in resp_xml:
-                if 'VMWProviderVdcResourcePool' in child.tag:
-                    for schild in child:
-                        if 'Link' in schild.tag:
-                            #Find href with type VMGroup and rel with add
-                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
-                                and schild.attrib.get('rel') == "add":
-                                vmgrp_href = schild.attrib.get('href')
-                                return vmgrp_href
-
-
-    def check_availibility_zone(self, az, respool_href, headers):
-        """ Method to verify requested av zone is present or not in provided
-            resource pool
-
-            Args:
-                az - name of hostgroup (availibility_zone)
-                respool_href - Resource Pool href
-                headers - Headers to make REST call
-            Returns:
-                az_found - True if availibility_zone is found else False
-        """
-        az_found = False
-        headers['Accept']='application/*+xml;version=27.0'
-        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
-        else:
-        #Get the href to hostGroups and find provided hostGroup is present in it
-            resp_xml = XmlElementTree.fromstring(resp.content)
-
-            for child in resp_xml:
-                if 'VMWProviderVdcResourcePool' in child.tag:
-                    for schild in child:
-                        if 'Link' in schild.tag:
-                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
-                                hostGroup_href = schild.attrib.get('href')
-                                hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
-                                if hg_resp.status_code != requests.codes.ok:
-                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
-                                else:
-                                    hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
-                                    for hostGroup in hg_resp_xml:
-                                        if 'HostGroup' in hostGroup.tag:
-                                            if hostGroup.attrib.get("name") == az:
-                                                az_found = True
-                                                break
-        return az_found
-
-
-    def get_pvdc_for_org(self, org_vdc, headers):
-        """ This method gets provider vdc references from organisation
-
-            Args:
-               org_vdc - name of the organisation VDC to find pvdc
-               headers - headers to make REST call
-
-            Returns:
-               None - if no pvdc href found else
-               pvdc_href - href to pvdc
-        """
-
-        #Get provider VDC references from vCD
-        pvdc_href = None
-        #url = '<vcd url>/api/admin/extension/providerVdcReferences'
-        url_list = [self.url, '/api/admin/extension/providerVdcReferences']
-        url = ''.join(url_list)
-
-        response = self.perform_request(req_type='GET',url=url, headers=headers)
-        if response.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}"\
-                               .format(url, response.status_code))
-        else:
-            xmlroot_response = XmlElementTree.fromstring(response.content)
-            for child in xmlroot_response:
-                if 'ProviderVdcReference' in child.tag:
-                    pvdc_href = child.attrib.get('href')
-                    #Get vdcReferences to find org
-                    pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
-                    if pvdc_resp.status_code != requests.codes.ok:
-                        raise vimconn.vimconnException("REST API call {} failed. "\
-                                                       "Return status code {}"\
-                                                       .format(url, pvdc_resp.status_code))
-
-                    pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
-                    for child in pvdc_resp_xml:
-                        if 'Link' in child.tag:
-                            if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
-                                vdc_href = child.attrib.get('href')
-
-                                #Check if provided org is present in vdc
-                                vdc_resp = self.perform_request(req_type='GET',
-                                                                url=vdc_href,
-                                                                headers=headers)
-                                if vdc_resp.status_code != requests.codes.ok:
-                                    raise vimconn.vimconnException("REST API call {} failed. "\
-                                                                   "Return status code {}"\
-                                                                   .format(url, vdc_resp.status_code))
-                                vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
-                                for child in vdc_resp_xml:
-                                    if 'VdcReference' in child.tag:
-                                        if child.attrib.get('name') == org_vdc:
-                                            return pvdc_href
-
-
-    def get_resource_pool_details(self, pvdc_href, headers):
-        """ Method to get resource pool information.
-            Host groups are property of resource group.
-            To get host groups, we need to GET details of resource pool.
-
-            Args:
-                pvdc_href: href to pvdc details
-                headers: headers
-
-            Returns:
-                respool_href - Returns href link reference to resource pool
-        """
-        respool_href = None
-        resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug ("REST API call {} failed. Return status code {}"\
-                               .format(pvdc_href, resp.status_code))
-        else:
-            respool_resp_xml = XmlElementTree.fromstring(resp.content)
-            for child in respool_resp_xml:
-                if 'Link' in child.tag:
-                    if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
-                        respool_href = child.attrib.get("href")
-                        break
-        return respool_href
-
-
-    def log_message(self, msg):
-        """
-            Method to log error messages related to Affinity rule creation
-            in new_vminstance & raise Exception
-                Args :
-                    msg - Error message to be logged
-
-        """
-        #get token to connect vCD as a normal user
-        self.get_token()
-        self.logger.debug(msg)
-        raise vimconn.vimconnException(msg)
-
-
-    ##
-    ##
-    ##  based on current discussion
-    ##
-    ##
-    ##  server:
-    #   created: '2016-09-08T11:51:58'
-    #   description: simple-instance.linux1.1
-    #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
-    #   hostId: e836c036-74e7-11e6-b249-0800273e724c
-    #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
-    #   status: ACTIVE
-    #   error_msg:
-    #   interfaces: …
-    #
-    def get_vminstance(self, vim_vm_uuid=None):
-        """Returns the VM instance information from VIM"""
-
-        self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
-
-        org, vdc = self.get_vdc_details()
-        if vdc is None:
-            raise vimconn.vimconnConnectionException(
-                "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
-
-        vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
-        if not vm_info_dict:
-            self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
-            raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
-
-        status_key = vm_info_dict['status']
-        error = ''
-        try:
-            vm_dict = {'created': vm_info_dict['created'],
-                       'description': vm_info_dict['name'],
-                       'status': vcdStatusCode2manoFormat[int(status_key)],
-                       'hostId': vm_info_dict['vmuuid'],
-                       'error_msg': error,
-                       'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
-
-            if 'interfaces' in vm_info_dict:
-                vm_dict['interfaces'] = vm_info_dict['interfaces']
-            else:
-                vm_dict['interfaces'] = []
-        except KeyError:
-            vm_dict = {'created': '',
-                       'description': '',
-                       'status': vcdStatusCode2manoFormat[int(-1)],
-                       'hostId': vm_info_dict['vmuuid'],
-                       'error_msg': "Inconsistency state",
-                       'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
-
-        return vm_dict
-
-    def delete_vminstance(self, vm__vim_uuid, created_items=None):
-        """Method poweroff and remove VM instance from vcloud director network.
-
-        Args:
-            vm__vim_uuid: VM UUID
-
-        Returns:
-            Returns the instance identifier
-        """
-
-        self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
-
-        org, vdc = self.get_vdc_details()
-        vdc_obj = VDC(self.client, href=vdc.get('href'))
-        if vdc_obj is None:
-            self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
-                self.tenant_name))
-            raise vimconn.vimconnException(
-                "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
-
-        try:
-            vapp_name = self.get_namebyvappid(vm__vim_uuid)
-            if vapp_name is None:
-                self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-                return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
-            self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
-            vapp_resource = vdc_obj.get_vapp(vapp_name)
-            vapp = VApp(self.client, resource=vapp_resource)
-
-            # Delete vApp and wait for status change if task executed and vApp is None.
-
-            if vapp:
-                if vapp_resource.get('deployed') == 'true':
-                    self.logger.info("Powering off vApp {}".format(vapp_name))
-                    #Power off vApp
-                    powered_off = False
-                    wait_time = 0
-                    while wait_time <= MAX_WAIT_TIME:
-                        power_off_task = vapp.power_off()
-                        result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
-
-                        if result.get('status') == 'success':
-                            powered_off = True
-                            break
-                        else:
-                            self.logger.info("Wait for vApp {} to power off".format(vapp_name))
-                            time.sleep(INTERVAL_TIME)
-
-                        wait_time +=INTERVAL_TIME
-                    if not powered_off:
-                        self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
-                    else:
-                        self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
-
-                    #Undeploy vApp
-                    self.logger.info("Undeploy vApp {}".format(vapp_name))
-                    wait_time = 0
-                    undeployed = False
-                    while wait_time <= MAX_WAIT_TIME:
-                        vapp = VApp(self.client, resource=vapp_resource)
-                        if not vapp:
-                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
-                        undeploy_task = vapp.undeploy()
-
-                        result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
-                        if result.get('status') == 'success':
-                            undeployed = True
-                            break
-                        else:
-                            self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
-                            time.sleep(INTERVAL_TIME)
-
-                        wait_time +=INTERVAL_TIME
-
-                    if not undeployed:
-                        self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
-
-                # delete vapp
-                self.logger.info("Start deletion of vApp {} ".format(vapp_name))
-
-                if vapp is not None:
-                    wait_time = 0
-                    result = False
-
-                    while wait_time <= MAX_WAIT_TIME:
-                        vapp = VApp(self.client, resource=vapp_resource)
-                        if not vapp:
-                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
-
-                        delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
-
-                        result = self.client.get_task_monitor().wait_for_success(task=delete_task)
-                        if result.get('status') == 'success':
-                            break
-                        else:
-                            self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
-                            time.sleep(INTERVAL_TIME)
-
-                        wait_time +=INTERVAL_TIME
-
-                    if result is None:
-                        self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
-                    else:
-                        self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
-                        config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
-                        catalog_list = self.get_image_list()
-                        try:
-                            config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
-                                                       if catalog_['name'] == config_drive_catalog_name][0]
-                        except IndexError:
-                            pass
-                        if config_drive_catalog_id:
-                            self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
-                                              'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
-                            self.delete_image(config_drive_catalog_id)
-                        return vm__vim_uuid
-        except:
-            self.logger.debug(traceback.format_exc())
-            raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
-
-
-    def refresh_vms_status(self, vm_list):
-        """Get the status of the virtual machines and their interfaces/ports
-           Params: the list of VM identifiers
-           Returns a dictionary with:
-                vm_id:          #VIM id of this Virtual Machine
-                    status:     #Mandatory. Text with one of:
-                                #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                                #  OTHER (Vim reported other status not understood)
-                                #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                                #  CREATING (on building process), ERROR
-                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                                #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                    interfaces:
-                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                        vim_net_id:       #network id where this interface is connected
-                        vim_interface_id: #interface/port VIM id
-                        ip_address:       #null, or text with IPv4, IPv6 address
-        """
-
-        self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
-
-        org,vdc = self.get_vdc_details()
-        if vdc is None:
-            raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
-
-        vms_dict = {}
-        nsx_edge_list = []
-        for vmuuid in vm_list:
-            vapp_name = self.get_namebyvappid(vmuuid)
-            if vapp_name is not None:
-
-                try:
-                    vm_pci_details = self.get_vm_pci_details(vmuuid)
-                    vdc_obj = VDC(self.client, href=vdc.get('href'))
-                    vapp_resource = vdc_obj.get_vapp(vapp_name)
-                    the_vapp = VApp(self.client, resource=vapp_resource)
-
-                    vm_details = {}
-                    for vm in the_vapp.get_all_vms():
-                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                        response = self.perform_request(req_type='GET',
-                                                        url=vm.get('href'),
-                                                        headers=headers)
-
-                        if response.status_code != 200:
-                            self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
-                                                            "status code : {}".format(vm.get('href'),
-                                                                                    response.content,
-                                                                               response.status_code))
-                            raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
-                                                                         "VM details")
-                        xmlroot = XmlElementTree.fromstring(response.content)
-
-                        
-                        result = response.content.replace("\n"," ")
-                        hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
-                        if hdd_match:
-                            hdd_mb = hdd_match.group(1)
-                            vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
-                        cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
-                        if cpus_match:
-                            cpus = cpus_match.group(1)
-                            vm_details['cpus'] = int(cpus) if cpus else None
-                        memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
-                        vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
-                        vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
-                        vm_details['id'] = xmlroot.get('id')
-                        vm_details['name'] = xmlroot.get('name')
-                        vm_info = [vm_details]
-                        if vm_pci_details:
-                            vm_info[0].update(vm_pci_details)
-
-                        vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
-                                   'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
-                                   'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
-
-                        # get networks
-                        vm_ip = None
-                        vm_mac = None
-                        networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
-                        for network in networks:
-                            mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
-                            vm_mac = mac_s.group(1) if mac_s else None
-                            ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
-                            vm_ip = ip_s.group(1) if ip_s else None
-
-                            if vm_ip is None:
-                                if not nsx_edge_list:
-                                    nsx_edge_list = self.get_edge_details()
-                                    if nsx_edge_list is None:
-                                        raise vimconn.vimconnException("refresh_vms_status:"\
-                                                                       "Failed to get edge details from NSX Manager")
-                                if vm_mac is not None:
-                                    vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
-
-                            net_s = re.search('network="(.*?)"',network)
-                            network_name = net_s.group(1) if net_s else None
-
-                            vm_net_id = self.get_network_id_by_name(network_name)
-                            interface = {"mac_address": vm_mac,
-                                         "vim_net_id": vm_net_id,
-                                         "vim_interface_id": vm_net_id,
-                                         "ip_address": vm_ip}
-
-                            vm_dict["interfaces"].append(interface)
-
-                    # add a vm to vm dict
-                    vms_dict.setdefault(vmuuid, vm_dict)
-                    self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
-                except Exception as exp:
-                    self.logger.debug("Error in response {}".format(exp))
-                    self.logger.debug(traceback.format_exc())
-
-        return vms_dict
-
-
-    def get_edge_details(self):
-        """Get the NSX edge list from NSX Manager
-           Returns list of NSX edges
-        """
-        edge_list = []
-        rheaders = {'Content-Type': 'application/xml'}
-        nsx_api_url = '/api/4.0/edges'
-
-        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
-
-        try:
-            resp = requests.get(self.nsx_manager + nsx_api_url,
-                                auth = (self.nsx_user, self.nsx_password),
-                                verify = False, headers = rheaders)
-            if resp.status_code == requests.codes.ok:
-                paged_Edge_List = XmlElementTree.fromstring(resp.text)
-                for edge_pages in paged_Edge_List:
-                    if edge_pages.tag == 'edgePage':
-                        for edge_summary in edge_pages:
-                            if edge_summary.tag == 'pagingInfo':
-                                for element in edge_summary:
-                                    if element.tag == 'totalCount' and element.text == '0':
-                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
-                                                                       .format(self.nsx_manager))
-
-                            if edge_summary.tag == 'edgeSummary':
-                                for element in edge_summary:
-                                    if element.tag == 'id':
-                                        edge_list.append(element.text)
-                    else:
-                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
-                                                       .format(self.nsx_manager))
-
-                if not edge_list:
-                    raise vimconn.vimconnException("get_edge_details: "\
-                                                   "No NSX edge details found: {}"
-                                                   .format(self.nsx_manager))
-                else:
-                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
-                    return edge_list
-            else:
-                self.logger.debug("get_edge_details: "
-                                  "Failed to get NSX edge details from NSX Manager: {}"
-                                  .format(resp.content))
-                return None
-
-        except Exception as exp:
-            self.logger.debug("get_edge_details: "\
-                              "Failed to get NSX edge details from NSX Manager: {}"
-                              .format(exp))
-            raise vimconn.vimconnException("get_edge_details: "\
-                                           "Failed to get NSX edge details from NSX Manager: {}"
-                                           .format(exp))
-
-
-    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
-        """Get IP address details from NSX edges, using the MAC address
-           PARAMS: nsx_edges : List of NSX edges
-                   mac_address : Find IP address corresponding to this MAC address
-           Returns: IP address corrresponding to the provided MAC address
-        """
-
-        ip_addr = None
-        rheaders = {'Content-Type': 'application/xml'}
-
-        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
-
-        try:
-            for edge in nsx_edges:
-                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
-
-                resp = requests.get(self.nsx_manager + nsx_api_url,
-                                    auth = (self.nsx_user, self.nsx_password),
-                                    verify = False, headers = rheaders)
-
-                if resp.status_code == requests.codes.ok:
-                    dhcp_leases = XmlElementTree.fromstring(resp.text)
-                    for child in dhcp_leases:
-                        if child.tag == 'dhcpLeaseInfo':
-                            dhcpLeaseInfo = child
-                            for leaseInfo in dhcpLeaseInfo:
-                                for elem in leaseInfo:
-                                    if (elem.tag)=='macAddress':
-                                        edge_mac_addr = elem.text
-                                    if (elem.tag)=='ipAddress':
-                                        ip_addr = elem.text
-                                if edge_mac_addr is not None:
-                                    if edge_mac_addr == mac_address:
-                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
-                                                          .format(ip_addr, mac_address,edge))
-                                        return ip_addr
-                else:
-                    self.logger.debug("get_ipaddr_from_NSXedge: "\
-                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
-                                      .format(resp.content))
-
-            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
-            return None
-
-        except XmlElementTree.ParseError as Err:
-            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
-
-
-    def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
-        """Send and action over a VM instance from VIM
-        Returns the vm_id if the action was successfully sent to the VIM"""
-
-        self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
-        if vm__vim_uuid is None or action_dict is None:
-            raise vimconn.vimconnException("Invalid request. VM id or action is None.")
-
-        org, vdc = self.get_vdc_details()
-        if vdc is None:
-            raise  vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
-
-        vapp_name = self.get_namebyvappid(vm__vim_uuid)
-        if vapp_name is None:
-            self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-            raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-        else:
-            self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
-
-        try:
-            vdc_obj = VDC(self.client, href=vdc.get('href'))
-            vapp_resource = vdc_obj.get_vapp(vapp_name)
-            vapp = VApp(self.client, resource=vapp_resource)
-            if "start" in action_dict:
-                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
-                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
-                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
-                self.instance_actions_result("start", result, vapp_name)
-            elif "rebuild" in action_dict:
-                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
-                rebuild_task = vapp.deploy(power_on=True)
-                result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
-                self.instance_actions_result("rebuild", result, vapp_name)
-            elif "pause" in action_dict:
-                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
-                pause_task = vapp.undeploy(action='suspend')
-                result = self.client.get_task_monitor().wait_for_success(task=pause_task)
-                self.instance_actions_result("pause", result, vapp_name)
-            elif "resume" in action_dict:
-                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
-                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
-                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
-                self.instance_actions_result("resume", result, vapp_name)
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                action_name , value = action_dict.items()[0]
-                #For python3
-                #action_name , value = list(action_dict.items())[0]
-                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
-                shutdown_task = vapp.shutdown()
-                result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
-                if action_name == "shutdown":
-                    self.instance_actions_result("shutdown", result, vapp_name)
-                else:
-                    self.instance_actions_result("shutoff", result, vapp_name)
-            elif "forceOff" in action_dict:
-                result = vapp.undeploy(action='powerOff')
-                self.instance_actions_result("forceOff", result, vapp_name)
-            elif "reboot" in action_dict:
-                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
-                reboot_task = vapp.reboot()
-                self.client.get_task_monitor().wait_for_success(task=reboot_task)
-            else:
-                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
-            return vm__vim_uuid
-        except Exception as exp :
-            self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
-            raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
-
-    def instance_actions_result(self, action, result, vapp_name):
-        if result.get('status') == 'success':
-            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
-        else:
-            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
-
-    def get_vminstance_console(self, vm_id, console_type="novnc"):
-        """
-        Get a console for the virtual machine
-        Params:
-            vm_id: uuid of the VM
-            console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types,
-                "rdp-html5" for RDP types, "spice-html5" for SPICE types
-        Returns dict with the console parameters:
-                protocol: ssh, ftp, http, https, ...
-                server:   usually ip address
-                port:     the http, ssh, ... port
-                suffix:   extra text, e.g. the http path and query string
-        """
-        console_dict = {}
-
-        if console_type==None or console_type=='novnc':
-
-            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
-
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='POST',
-                                         url=url_rest_call,
-                                           headers=headers)
-
-            if response.status_code == 403:
-                response = self.retry_rest('GET', url_rest_call)
-
-            if response.status_code != 200:
-                self.logger.error("REST call {} failed reason : {}"\
-                                  "status code : {}".format(url_rest_call,
-                                                         response.content,
-                                                    response.status_code))
-                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
-                                                                     "VM Mks ticket details")
-            s = re.search("<Host>(.*?)</Host>",response.content)
-            console_dict['server'] = s.group(1) if s else None
-            s1 = re.search("<Port>(\d+)</Port>",response.content)
-            console_dict['port'] = s1.group(1) if s1 else None
-
-
-            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
-
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='POST',
-                                         url=url_rest_call,
-                                           headers=headers)
-
-            if response.status_code == 403:
-                response = self.retry_rest('GET', url_rest_call)
-
-            if response.status_code != 200:
-                self.logger.error("REST call {} failed reason : {}"\
-                                  "status code : {}".format(url_rest_call,
-                                                         response.content,
-                                                    response.status_code))
-                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
-                                                                     "VM console details")
-            s = re.search(">.*?/(vm-\d+.*)</",response.content)
-            console_dict['suffix'] = s.group(1) if s else None
-            console_dict['protocol'] = "https"
-
-        return console_dict
-
-    # NOT USED METHODS in current version
-
-    def host_vim2gui(self, host, server_dict):
-        """Transform host dictionary from VIM format to GUI format,
-        and append to the server_dict
-        """
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def get_hosts(self, vim_tenant):
-        """Get the hosts and deployed instances
-        Returns the hosts content"""
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def get_processor_rankings(self):
-        """Get the processor rankings in the VIM database"""
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def new_host(self, host_data):
-        """Adds a new host to VIM"""
-        '''Returns status code of the VIM response'''
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM"""
-        '''Returns the port identifier'''
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def new_external_network(self, net_name, net_type):
-        """Adds a external network to VIM (shared)"""
-        '''Returns the network identifier'''
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network"""
-        '''Returns status code of the VIM response'''
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def new_vminstancefromJSON(self, vm_data):
-        """Adds a VM instance to VIM"""
-        '''Returns the instance identifier'''
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
-
-    def get_network_name_by_id(self, network_uuid=None):
-        """Method gets vcloud director network named based on supplied uuid.
-
-        Args:
-            network_uuid: network_id
-
-        Returns:
-            The return network name.
-        """
-
-        if not network_uuid:
-            return None
-
-        try:
-            org_dict = self.get_org(self.org_uuid)
-            if 'networks' in org_dict:
-                org_network_dict = org_dict['networks']
-                for net_uuid in org_network_dict:
-                    if net_uuid == network_uuid:
-                        return org_network_dict[net_uuid]
-        except:
-            self.logger.debug("Exception in get_network_name_by_id")
-            self.logger.debug(traceback.format_exc())
-
-        return None
-
-    def get_network_id_by_name(self, network_name=None):
-        """Method gets vcloud director network uuid based on supplied name.
-
-        Args:
-            network_name: network_name
-        Returns:
-            The return network uuid.
-            network_uuid: network_id
-        """
-
-        if not network_name:
-            self.logger.debug("get_network_id_by_name() : Network name is empty")
-            return None
-
-        try:
-            org_dict = self.get_org(self.org_uuid)
-            if org_dict and 'networks' in org_dict:
-                org_network_dict = org_dict['networks']
-                for net_uuid,net_name in org_network_dict.iteritems():
-                #For python3
-                #for net_uuid,net_name in org_network_dict.items():
-                    if net_name == network_name:
-                        return net_uuid
-
-        except KeyError as exp:
-            self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
-
-        return None
-
-    def list_org_action(self):
-        """
-        Method leverages vCloud director and query for available organization for particular user
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return XML respond
-        """
-        url_list = [self.url, '/api/org']
-        vm_list_rest_call = ''.join(url_list)
-
-        if self.client._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-            response = self.perform_request(req_type='GET',
-                                     url=vm_list_rest_call,
-                                           headers=headers)
-
-            if response.status_code == 403:
-                response = self.retry_rest('GET', vm_list_rest_call)
-
-            if response.status_code == requests.codes.ok:
-                return response.content
-
-        return None
-
-    def get_org_action(self, org_uuid=None):
-        """
-        Method leverages vCloud director and retrieve available object for organization.
-
-        Args:
-            org_uuid - vCD organization uuid
-            self.client - is active connection.
-
-            Returns:
-                The return XML respond
-        """
-
-        if org_uuid is None:
-            return None
-
-        url_list = [self.url, '/api/org/', org_uuid]
-        vm_list_rest_call = ''.join(url_list)
-
-        if self.client._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-            #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
-            response = self.perform_request(req_type='GET',
-                                            url=vm_list_rest_call,
-                                            headers=headers)
-            if response.status_code == 403:
-                response = self.retry_rest('GET', vm_list_rest_call)
-
-            if response.status_code == requests.codes.ok:
-                return response.content
-        return None
-
-    def get_org(self, org_uuid=None):
-        """
-        Method retrieves available organization in vCloud Director
-
-        Args:
-            org_uuid - is a organization uuid.
-
-            Returns:
-                The return dictionary with following key
-                    "network" - for network list under the org
-                    "catalogs" - for network list under the org
-                    "vdcs" - for vdc list under org
-        """
-
-        org_dict = {}
-
-        if org_uuid is None:
-            return org_dict
-
-        content = self.get_org_action(org_uuid=org_uuid)
-        try:
-            vdc_list = {}
-            network_list = {}
-            catalog_list = {}
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for child in vm_list_xmlroot:
-                if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
-                    vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
-                    org_dict['vdcs'] = vdc_list
-                if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
-                    network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
-                    org_dict['networks'] = network_list
-                if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
-                    catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
-                    org_dict['catalogs'] = catalog_list
-        except:
-            pass
-
-        return org_dict
-
-    def get_org_list(self):
-        """
-        Method retrieves available organization in vCloud Director
-
-        Args:
-            vca - is active VCA connection.
-
-            Returns:
-                The return dictionary and key for each entry VDC UUID
-        """
-
-        org_dict = {}
-
-        content = self.list_org_action()
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for vm_xml in vm_list_xmlroot:
-                if vm_xml.tag.split("}")[1] == 'Org':
-                    org_uuid = vm_xml.attrib['href'].split('/')[-1:]
-                    org_dict[org_uuid[0]] = vm_xml.attrib['name']
-        except:
-            pass
-
-        return org_dict
-
-    def vms_view_action(self, vdc_name=None):
-        """ Method leverages vCloud director vms query call
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return XML respond
-        """
-        vca = self.connect()
-        if vdc_name is None:
-            return None
-
-        url_list = [vca.host, '/api/vms/query']
-        vm_list_rest_call = ''.join(url_list)
-
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
-                          vca.vcloud_session.organization.Link)
-            #For python3
-            #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
-            #        ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
-            if len(refs) == 1:
-                response = Http.get(url=vm_list_rest_call,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=vca.logger)
-                if response.status_code == requests.codes.ok:
-                    return response.content
-
-        return None
-
-    def get_vapp_list(self, vdc_name=None):
-        """
-        Method retrieves vApp list deployed vCloud director and returns a dictionary
-        contains a list of all vapp deployed for queried VDC.
-        The key for a dictionary is vApp UUID
-
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return dictionary and key for each entry vapp UUID
-        """
-
-        vapp_dict = {}
-        if vdc_name is None:
-            return vapp_dict
-
-        content = self.vms_view_action(vdc_name=vdc_name)
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for vm_xml in vm_list_xmlroot:
-                if vm_xml.tag.split("}")[1] == 'VMRecord':
-                    if vm_xml.attrib['isVAppTemplate'] == 'true':
-                        rawuuid = vm_xml.attrib['container'].split('/')[-1:]
-                        if 'vappTemplate-' in rawuuid[0]:
-                            # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
-                            # vm and use raw UUID as key
-                            vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
-        except:
-            pass
-
-        return vapp_dict
-
-    def get_vm_list(self, vdc_name=None):
-        """
-        Method retrieves VM's list deployed vCloud director. It returns a dictionary
-        contains a list of all VM's deployed for queried VDC.
-        The key for a dictionary is VM UUID
-
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return dictionary and key for each entry vapp UUID
-        """
-        vm_dict = {}
-
-        if vdc_name is None:
-            return vm_dict
-
-        content = self.vms_view_action(vdc_name=vdc_name)
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for vm_xml in vm_list_xmlroot:
-                if vm_xml.tag.split("}")[1] == 'VMRecord':
-                    if vm_xml.attrib['isVAppTemplate'] == 'false':
-                        rawuuid = vm_xml.attrib['href'].split('/')[-1:]
-                        if 'vm-' in rawuuid[0]:
-                            # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
-                            #  vm and use raw UUID as key
-                            vm_dict[rawuuid[0][3:]] = vm_xml.attrib
-        except:
-            pass
-
-        return vm_dict
-
-    def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
-        """
-        Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
-        contains a list of all VM's deployed for queried VDC.
-        The key for a dictionary is VM UUID
-
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return dictionary and key for each entry vapp UUID
-        """
-        vm_dict = {}
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
-        if vdc_name is None:
-            return vm_dict
-
-        content = self.vms_view_action(vdc_name=vdc_name)
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for vm_xml in vm_list_xmlroot:
-                if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
-                    # lookup done by UUID
-                    if isuuid:
-                        if vapp_name in vm_xml.attrib['container']:
-                            rawuuid = vm_xml.attrib['href'].split('/')[-1:]
-                            if 'vm-' in rawuuid[0]:
-                                vm_dict[rawuuid[0][3:]] = vm_xml.attrib
-                                break
-                    # lookup done by Name
-                    else:
-                        if vapp_name in vm_xml.attrib['name']:
-                            rawuuid = vm_xml.attrib['href'].split('/')[-1:]
-                            if 'vm-' in rawuuid[0]:
-                                vm_dict[rawuuid[0][3:]] = vm_xml.attrib
-                                break
-        except:
-            pass
-
-        return vm_dict
-
-    def get_network_action(self, network_uuid=None):
-        """
-        Method leverages vCloud director and query network based on network uuid
-
-        Args:
-            vca - is active VCA connection.
-            network_uuid - is a network uuid
-
-            Returns:
-                The return XML respond
-        """
-
-        if network_uuid is None:
-            return None
-
-        url_list = [self.url, '/api/network/', network_uuid]
-        vm_list_rest_call = ''.join(url_list)
-
-        if self.client._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-            response = self.perform_request(req_type='GET',
-                                            url=vm_list_rest_call,
-                                            headers=headers)
-            #Retry login if session expired & retry sending request
-            if response.status_code == 403:
-                response = self.retry_rest('GET', vm_list_rest_call)
-
-            if response.status_code == requests.codes.ok:
-                return response.content
-
-        return None
-
-    def get_vcd_network(self, network_uuid=None):
-        """
-        Method retrieves available network from vCloud Director
-
-        Args:
-            network_uuid - is VCD network UUID
-
-        Each element serialized as key : value pair
-
-        Following keys available for access.    network_configuration['Gateway'}
-        <Configuration>
-          <IpScopes>
-            <IpScope>
-                <IsInherited>true</IsInherited>
-                <Gateway>172.16.252.100</Gateway>
-                <Netmask>255.255.255.0</Netmask>
-                <Dns1>172.16.254.201</Dns1>
-                <Dns2>172.16.254.202</Dns2>
-                <DnsSuffix>vmwarelab.edu</DnsSuffix>
-                <IsEnabled>true</IsEnabled>
-                <IpRanges>
-                    <IpRange>
-                        <StartAddress>172.16.252.1</StartAddress>
-                        <EndAddress>172.16.252.99</EndAddress>
-                    </IpRange>
-                </IpRanges>
-            </IpScope>
-        </IpScopes>
-        <FenceMode>bridged</FenceMode>
-
-        Returns:
-                The return dictionary and key for each entry vapp UUID
-        """
-
-        network_configuration = {}
-        if network_uuid is None:
-            return network_uuid
-
-        try:
-            content = self.get_network_action(network_uuid=network_uuid)
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-
-            network_configuration['status'] = vm_list_xmlroot.get("status")
-            network_configuration['name'] = vm_list_xmlroot.get("name")
-            network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
-
-            for child in vm_list_xmlroot:
-                if child.tag.split("}")[1] == 'IsShared':
-                    network_configuration['isShared'] = child.text.strip()
-                if child.tag.split("}")[1] == 'Configuration':
-                    for configuration in child.iter():
-                        tagKey = configuration.tag.split("}")[1].strip()
-                        if tagKey != "":
-                            network_configuration[tagKey] = configuration.text.strip()
-            return network_configuration
-        except Exception as exp :
-            self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
-            raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
-
-        return network_configuration
-
-    def delete_network_action(self, network_uuid=None):
-        """
-        Method delete given network from vCloud director
-
-        Args:
-            network_uuid - is a network uuid that client wish to delete
-
-            Returns:
-                The return None or XML respond or false
-        """
-        client = self.connect_as_admin()
-        if not client:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
-        if network_uuid is None:
-            return False
-
-        url_list = [self.url, '/api/admin/network/', network_uuid]
-        vm_list_rest_call = ''.join(url_list)
-
-        if client._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                     'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='DELETE',
-                                            url=vm_list_rest_call,
-                                            headers=headers)
-            if response.status_code == 202:
-                return True
-
-        return False
-
-    def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
-                       ip_profile=None, isshared='true'):
-        """
-        Method create network in vCloud director
-
-        Args:
-            network_name - is network name to be created.
-            net_type - can be 'bridge','data','ptp','mgmt'.
-            ip_profile is a dict containing the IP parameters of the network
-            isshared - is a boolean
-            parent_network_uuid - is parent provider vdc network that will be used for mapping.
-            It optional attribute. by default if no parent network indicate the first available will be used.
-
-            Returns:
-                The return network uuid or return None
-        """
-
-        new_network_name = [network_name, '-', str(uuid.uuid4())]
-        content = self.create_network_rest(network_name=''.join(new_network_name),
-                                           ip_profile=ip_profile,
-                                           net_type=net_type,
-                                           parent_network_uuid=parent_network_uuid,
-                                           isshared=isshared)
-        if content is None:
-            self.logger.debug("Failed create network {}.".format(network_name))
-            return None
-
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            vcd_uuid = vm_list_xmlroot.get('id').split(":")
-            if len(vcd_uuid) == 4:
-                self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
-                return vcd_uuid[3]
-        except:
-            self.logger.debug("Failed create network {}".format(network_name))
-            return None
-
-    def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
-                            ip_profile=None, isshared='true'):
-        """
-        Method create network in vCloud director
-
-        Args:
-            network_name - is network name to be created.
-            net_type - can be 'bridge','data','ptp','mgmt'.
-            ip_profile is a dict containing the IP parameters of the network
-            isshared - is a boolean
-            parent_network_uuid - is parent provider vdc network that will be used for mapping.
-            It optional attribute. by default if no parent network indicate the first available will be used.
-
-            Returns:
-                The return network uuid or return None
-        """
-        client_as_admin = self.connect_as_admin()
-        if not client_as_admin:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
-        if network_name is None:
-            return None
-
-        url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
-        vm_list_rest_call = ''.join(url_list)
-
-        if client_as_admin._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                     'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
-
-            response = self.perform_request(req_type='GET',
-                                            url=vm_list_rest_call,
-                                            headers=headers)
-
-            provider_network = None
-            available_networks = None
-            add_vdc_rest_url = None
-
-            if response.status_code != requests.codes.ok:
-                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
-                                                                                          response.status_code))
-                return None
-            else:
-                try:
-                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
-                    for child in vm_list_xmlroot:
-                        if child.tag.split("}")[1] == 'ProviderVdcReference':
-                            provider_network = child.attrib.get('href')
-                            # application/vnd.vmware.admin.providervdc+xml
-                        if child.tag.split("}")[1] == 'Link':
-                            if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
-                                    and child.attrib.get('rel') == 'add':
-                                add_vdc_rest_url = child.attrib.get('href')
-                except:
-                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
-                    self.logger.debug("Respond body {}".format(response.content))
-                    return None
-
-            # find  pvdc provided available network
-            response = self.perform_request(req_type='GET',
-                                            url=provider_network,
-                                            headers=headers)
-            if response.status_code != requests.codes.ok:
-                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
-                                                                                          response.status_code))
-                return None
-
-            if parent_network_uuid is None:
-                try:
-                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
-                    for child in vm_list_xmlroot.iter():
-                        if child.tag.split("}")[1] == 'AvailableNetworks':
-                            for networks in child.iter():
-                                # application/vnd.vmware.admin.network+xml
-                                if networks.attrib.get('href') is not None:
-                                    available_networks = networks.attrib.get('href')
-                                    break
-                except:
-                    return None
-
-            try:
-                #Configure IP profile of the network
-                ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
-
-                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
-                    subnet_rand = random.randint(0, 255)
-                    ip_base = "192.168.{}.".format(subnet_rand)
-                    ip_profile['subnet_address'] = ip_base + "0/24"
-                else:
-                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
-
-                if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
-                    ip_profile['gateway_address']=ip_base + "1"
-                if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
-                    ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
-                if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
-                    ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
-                if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
-                    ip_profile['dhcp_start_address']=ip_base + "3"
-                if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
-                    ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
-                if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
-                    ip_profile['dns_address']=ip_base + "2"
-
-                gateway_address=ip_profile['gateway_address']
-                dhcp_count=int(ip_profile['dhcp_count'])
-                subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
-
-                if ip_profile['dhcp_enabled']==True:
-                    dhcp_enabled='true'
-                else:
-                    dhcp_enabled='false'
-                dhcp_start_address=ip_profile['dhcp_start_address']
-
-                #derive dhcp_end_address from dhcp_start_address & dhcp_count
-                end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
-                end_ip_int += dhcp_count - 1
-                dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
-
-                ip_version=ip_profile['ip_version']
-                dns_address=ip_profile['dns_address']
-            except KeyError as exp:
-                self.logger.debug("Create Network REST: Key error {}".format(exp))
-                raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
-
-            # either use client provided UUID or search for a first available
-            #  if both are not defined we return none
-            if parent_network_uuid is not None:
-                provider_network = None
-                available_networks = None
-                add_vdc_rest_url = None
-
-                url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
-                add_vdc_rest_url = ''.join(url_list)
-
-                url_list = [self.url, '/api/admin/network/', parent_network_uuid]
-                available_networks = ''.join(url_list)
-
-            #Creating all networks as Direct Org VDC type networks.
-            #Unused in case of Underlay (data/ptp) network interface.
-            fence_mode="isolated"
-            is_inherited='false'
-            dns_list = dns_address.split(";")
-            dns1 = dns_list[0]
-            dns2_text = ""
-            if len(dns_list) >= 2:
-                dns2_text = "\n                                                <Dns2>{}</Dns2>\n".format(dns_list[1])
-            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                            <Description>Openmano created</Description>
-                                    <Configuration>
-                                        <IpScopes>
-                                            <IpScope>
-                                                <IsInherited>{1:s}</IsInherited>
-                                                <Gateway>{2:s}</Gateway>
-                                                <Netmask>{3:s}</Netmask>
-                                                <Dns1>{4:s}</Dns1>{5:s}
-                                                <IsEnabled>{6:s}</IsEnabled>
-                                                <IpRanges>
-                                                    <IpRange>
-                                                        <StartAddress>{7:s}</StartAddress>
-                                                        <EndAddress>{8:s}</EndAddress>
-                                                    </IpRange>
-                                                </IpRanges>
-                                            </IpScope>
-                                        </IpScopes>
-                                        <FenceMode>{9:s}</FenceMode>
-                                    </Configuration>
-                                    <IsShared>{10:s}</IsShared>
-                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                    subnet_address, dns1, dns2_text, dhcp_enabled,
-                                                    dhcp_start_address, dhcp_end_address,
-                                                    fence_mode, isshared)
-
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
-            try:
-                response = self.perform_request(req_type='POST',
-                                           url=add_vdc_rest_url,
-                                           headers=headers,
-                                           data=data)
-
-                if response.status_code != 201:
-                    self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
-                                      .format(response.status_code,response.content))
-                else:
-                    network_task = self.get_task_from_response(response.content)
-                    self.logger.debug("Create Network REST : Waiting for Network creation complete")
-                    time.sleep(5)
-                    result = self.client.get_task_monitor().wait_for_success(task=network_task)
-                    if result.get('status') == 'success':
-                        return response.content
-                    else:
-                        self.logger.debug("create_network_rest task failed. Network Create response : {}"
-                                          .format(response.content))
-            except Exception as exp:
-                self.logger.debug("create_network_rest : Exception : {} ".format(exp))
-
-        return None
-
-    def convert_cidr_to_netmask(self, cidr_ip=None):
-        """
-        Method sets convert CIDR netmask address to normal IP format
-        Args:
-            cidr_ip : CIDR IP address
-            Returns:
-                netmask : Converted netmask
-        """
-        if cidr_ip is not None:
-            if '/' in cidr_ip:
-                network, net_bits = cidr_ip.split('/')
-                netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
-            else:
-                netmask = cidr_ip
-            return netmask
-        return None
-
-    def get_provider_rest(self, vca=None):
-        """
-        Method gets provider vdc view from vcloud director
-
-        Args:
-            network_name - is network name to be created.
-            parent_network_uuid - is parent provider vdc network that will be used for mapping.
-            It optional attribute. by default if no parent network indicate the first available will be used.
-
-            Returns:
-                The return xml content of respond or None
-        """
-
-        url_list = [self.url, '/api/admin']
-        if vca:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='GET',
-                                            url=''.join(url_list),
-                                            headers=headers)
-
-        if response.status_code == requests.codes.ok:
-            return response.content
-        return None
-
-    def create_vdc(self, vdc_name=None):
-
-        vdc_dict = {}
-
-        xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
-        if xml_content is not None:
-            try:
-                task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
-                for child in task_resp_xmlroot:
-                    if child.tag.split("}")[1] == 'Owner':
-                        vdc_id = child.attrib.get('href').split("/")[-1]
-                        vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
-                        return vdc_dict
-            except:
-                self.logger.debug("Respond body {}".format(xml_content))
-
-        return None
-
-    def create_vdc_from_tmpl_rest(self, vdc_name=None):
-        """
-        Method create vdc in vCloud director based on VDC template.
-        it uses pre-defined template.
-
-        Args:
-            vdc_name -  name of a new vdc.
-
-            Returns:
-                The return xml content of respond or None
-        """
-        # pre-requesite atleast one vdc template should be available in vCD
-        self.logger.info("Creating new vdc {}".format(vdc_name))
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD")
-        if vdc_name is None:
-            return None
-
-        url_list = [self.url, '/api/vdcTemplates']
-        vm_list_rest_call = ''.join(url_list)
-
-        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                    'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-        response = self.perform_request(req_type='GET',
-                                        url=vm_list_rest_call,
-                                        headers=headers)
-
-        # container url to a template
-        vdc_template_ref = None
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(response.content)
-            for child in vm_list_xmlroot:
-                # application/vnd.vmware.admin.providervdc+xml
-                # we need find a template from witch we instantiate VDC
-                if child.tag.split("}")[1] == 'VdcTemplate':
-                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
-                        vdc_template_ref = child.attrib.get('href')
-        except:
-            self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
-            self.logger.debug("Respond body {}".format(response.content))
-            return None
-
-        # if we didn't found required pre defined template we return None
-        if vdc_template_ref is None:
-            return None
-
-        try:
-            # instantiate vdc
-            url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
-            vm_list_rest_call = ''.join(url_list)
-            data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                        <Source href="{1:s}"></Source>
-                                        <Description>opnemano</Description>
-                                        </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
-
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
-
-            response = self.perform_request(req_type='POST',
-                                            url=vm_list_rest_call,
-                                            headers=headers,
-                                            data=data)
-
-            vdc_task = self.get_task_from_response(response.content)
-            self.client.get_task_monitor().wait_for_success(task=vdc_task)
-
-            # if we all ok we respond with content otherwise by default None
-            if response.status_code >= 200 and response.status_code < 300:
-                return response.content
-            return None
-        except:
-            self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
-            self.logger.debug("Respond body {}".format(response.content))
-
-        return None
-
-    def create_vdc_rest(self, vdc_name=None):
-        """
-        Method create network in vCloud director
-
-        Args:
-            vdc_name - vdc name to be created
-            Returns:
-                The return response
-        """
-
-        self.logger.info("Creating new vdc {}".format(vdc_name))
-
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD")
-        if vdc_name is None:
-            return None
-
-        url_list = [self.url, '/api/admin/org/', self.org_uuid]
-        vm_list_rest_call = ''.join(url_list)
-
-        if vca._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='GET',
-                                            url=vm_list_rest_call,
-                                            headers=headers)
-
-            provider_vdc_ref = None
-            add_vdc_rest_url = None
-            available_networks = None
-
-            if response.status_code != requests.codes.ok:
-                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
-                                                                                          response.status_code))
-                return None
-            else:
-                try:
-                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
-                    for child in vm_list_xmlroot:
-                        # application/vnd.vmware.admin.providervdc+xml
-                        if child.tag.split("}")[1] == 'Link':
-                            if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
-                                    and child.attrib.get('rel') == 'add':
-                                add_vdc_rest_url = child.attrib.get('href')
-                except:
-                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
-                    self.logger.debug("Respond body {}".format(response.content))
-                    return None
-
-                response = self.get_provider_rest(vca=vca)
-                try:
-                    vm_list_xmlroot = XmlElementTree.fromstring(response)
-                    for child in vm_list_xmlroot:
-                        if child.tag.split("}")[1] == 'ProviderVdcReferences':
-                            for sub_child in child:
-                                provider_vdc_ref = sub_child.attrib.get('href')
-                except:
-                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
-                    self.logger.debug("Respond body {}".format(response))
-                    return None
-
-                if add_vdc_rest_url is not None and provider_vdc_ref is not None:
-                    data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
-                            <AllocationModel>ReservationPool</AllocationModel>
-                            <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
-                            <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
-                            </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
-                            <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
-                            <ProviderVdcReference
-                            name="Main Provider"
-                            href="{2:s}" />
-                    <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
-                                                                                                  escape(vdc_name),
-                                                                                                  provider_vdc_ref)
-
-                    headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
-
-                    response = self.perform_request(req_type='POST',
-                                                    url=add_vdc_rest_url,
-                                                    headers=headers,
-                                                    data=data)
-
-                    # if we all ok we respond with content otherwise by default None
-                    if response.status_code == 201:
-                        return response.content
-        return None
-
-    def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
-        """
-        Method retrieve vapp detail from vCloud director
-
-        Args:
-            vapp_uuid - is vapp identifier.
-
-            Returns:
-                The return network uuid or return None
-        """
-
-        parsed_respond = {}
-        vca = None
-
-        if need_admin_access:
-            vca = self.connect_as_admin()
-        else:
-            vca = self.client
-
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD")
-        if vapp_uuid is None:
-            return None
-
-        url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
-        get_vapp_restcall = ''.join(url_list)
-
-        if vca._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='GET',
-                                            url=get_vapp_restcall,
-                                            headers=headers)
-
-            if response.status_code == 403:
-                if need_admin_access == False:
-                    response = self.retry_rest('GET', get_vapp_restcall)
-
-            if response.status_code != requests.codes.ok:
-                self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
-                                                                                          response.status_code))
-                return parsed_respond
-
-            try:
-                xmlroot_respond = XmlElementTree.fromstring(response.content)
-                parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
-
-                namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
-                              'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
-                              'vmw': 'http://www.vmware.com/schema/ovf',
-                              'vm': 'http://www.vmware.com/vcloud/v1.5',
-                              'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
-                              "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
-                              "xmlns":"http://www.vmware.com/vcloud/v1.5"
-                             }
-
-                created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
-                if created_section is not None:
-                    parsed_respond['created'] = created_section.text
-
-                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
-                if network_section is not None and 'networkName' in network_section.attrib:
-                    parsed_respond['networkname'] = network_section.attrib['networkName']
-
-                ipscopes_section = \
-                    xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
-                                         namespaces)
-                if ipscopes_section is not None:
-                    for ipscope in ipscopes_section:
-                        for scope in ipscope:
-                            tag_key = scope.tag.split("}")[1]
-                            if tag_key == 'IpRanges':
-                                ip_ranges = scope.getchildren()
-                                for ipblock in ip_ranges:
-                                    for block in ipblock:
-                                        parsed_respond[block.tag.split("}")[1]] = block.text
-                            else:
-                                parsed_respond[tag_key] = scope.text
-
-                # parse children section for other attrib
-                children_section = xmlroot_respond.find('vm:Children/', namespaces)
-                if children_section is not None:
-                    parsed_respond['name'] = children_section.attrib['name']
-                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
-                     if  "nestedHypervisorEnabled" in children_section.attrib else None
-                    parsed_respond['deployed'] = children_section.attrib['deployed']
-                    parsed_respond['status'] = children_section.attrib['status']
-                    parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
-                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
-                    nic_list = []
-                    for adapters in network_adapter:
-                        adapter_key = adapters.tag.split("}")[1]
-                        if adapter_key == 'PrimaryNetworkConnectionIndex':
-                            parsed_respond['primarynetwork'] = adapters.text
-                        if adapter_key == 'NetworkConnection':
-                            vnic = {}
-                            if 'network' in adapters.attrib:
-                                vnic['network'] = adapters.attrib['network']
-                            for adapter in adapters:
-                                setting_key = adapter.tag.split("}")[1]
-                                vnic[setting_key] = adapter.text
-                            nic_list.append(vnic)
-
-                    for link in children_section:
-                        if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
-                            if link.attrib['rel'] == 'screen:acquireTicket':
-                                parsed_respond['acquireTicket'] = link.attrib
-                            if link.attrib['rel'] == 'screen:acquireMksTicket':
-                                parsed_respond['acquireMksTicket'] = link.attrib
-
-                    parsed_respond['interfaces'] = nic_list
-                    vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
-                    if vCloud_extension_section is not None:
-                        vm_vcenter_info = {}
-                        vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
-                        vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
-                        if vmext is not None:
-                            vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
-                        parsed_respond["vm_vcenter_info"]= vm_vcenter_info
-
-                    virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
-                    vm_virtual_hardware_info = {}
-                    if virtual_hardware_section is not None:
-                        for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
-                            if item.find("rasd:Description",namespaces).text == "Hard disk":
-                                disk_size = item.find("rasd:HostResource" ,namespaces
-                                                ).attrib["{"+namespaces['vm']+"}capacity"]
-
-                                vm_virtual_hardware_info["disk_size"]= disk_size
-                                break
-
-                        for link in virtual_hardware_section:
-                            if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
-                                if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
-                                    vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
-                                    break
-
-                    parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
-            except Exception as exp :
-                self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
-        return parsed_respond
-
-    def acquire_console(self, vm_uuid=None):
-
-        if vm_uuid is None:
-            return None
-        if self.client._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
-            console_dict = vm_dict['acquireTicket']
-            console_rest_call = console_dict['href']
-
-            response = self.perform_request(req_type='POST',
-                                            url=console_rest_call,
-                                            headers=headers)
-
-            if response.status_code == 403:
-                response = self.retry_rest('POST', console_rest_call)
-
-            if response.status_code == requests.codes.ok:
-                return response.content
-
-        return None
-
-    def modify_vm_disk(self, vapp_uuid, flavor_disk):
-        """
-        Method retrieve vm disk details
-
-        Args:
-            vapp_uuid - is vapp identifier.
-            flavor_disk - disk size as specified in VNFD (flavor)
-
-            Returns:
-                The return network uuid or return None
-        """
-        status = None
-        try:
-            #Flavor disk is in GB convert it into MB
-            flavor_disk = int(flavor_disk) * 1024
-            vm_details = self.get_vapp_details_rest(vapp_uuid)
-            if vm_details:
-                vm_name = vm_details["name"]
-                self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
-
-            if vm_details and "vm_virtual_hardware" in vm_details:
-                vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
-                disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
-
-                self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
-
-                if flavor_disk > vm_disk:
-                    status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
-                    self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
-                                                         vm_disk,  flavor_disk ))
-                else:
-                    status = True
-                    self.logger.info("No need to modify disk of VM {}".format(vm_name))
-
-            return status
-        except Exception as exp:
-            self.logger.info("Error occurred while modifing disk size {}".format(exp))
-
-
-    def modify_vm_disk_rest(self, disk_href , disk_size):
-        """
-        Method retrieve modify vm disk size
-
-        Args:
-            disk_href - vCD API URL to GET and PUT disk data
-            disk_size - disk size as specified in VNFD (flavor)
-
-            Returns:
-                The return network uuid or return None
-        """
-        if disk_href is None or disk_size is None:
-            return None
-
-        if self.client._session:
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                response = self.perform_request(req_type='GET',
-                                                url=disk_href,
-                                                headers=headers)
-
-        if response.status_code == 403:
-            response = self.retry_rest('GET', disk_href)
-
-        if response.status_code != requests.codes.ok:
-            self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
-                                                                            response.status_code))
-            return None
-        try:
-            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
-            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
-            #For python3
-            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
-            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
-
-            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
-                if item.find("rasd:Description",namespaces).text == "Hard disk":
-                    disk_item = item.find("rasd:HostResource" ,namespaces )
-                    if disk_item is not None:
-                        disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
-                        break
-
-            data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
-                                             xml_declaration=True)
-
-            #Send PUT request to modify disk size
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
-
-            response = self.perform_request(req_type='PUT',
-                                                url=disk_href,
-                                                headers=headers,
-                                                data=data)
-            if response.status_code == 403:
-                add_headers = {'Content-Type': headers['Content-Type']}
-                response = self.retry_rest('PUT', disk_href, add_headers, data)
-
-            if response.status_code != 202:
-                self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
-                                                                            response.status_code))
-            else:
-                modify_disk_task = self.get_task_from_response(response.content)
-                result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
-                if result.get('status') == 'success':
-                    return True
-                else:
-                    return False
-            return None
-
-        except Exception as exp :
-                self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
-                return None
-
-    def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
-        """
-            Method to attach pci devices to VM
-
-             Args:
-                vapp_uuid - uuid of vApp/VM
-                pci_devices - pci devices infromation as specified in VNFD (flavor)
-
-            Returns:
-                The status of add pci device task , vm object and
-                vcenter_conect object
-        """
-        vm_obj = None
-        self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
-        vcenter_conect, content = self.get_vcenter_content()
-        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
-
-        if vm_moref_id:
-            try:
-                no_of_pci_devices = len(pci_devices)
-                if no_of_pci_devices > 0:
-                    #Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
-                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
-                    if host_obj and vm_obj:
-                        #get PCI devies from host on which vapp is currently installed
-                        avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
-
-                        if avilable_pci_devices is None:
-                            #find other hosts with active pci devices
-                            new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
-                                                                content,
-                                                                no_of_pci_devices
-                                                                )
-
-                            if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
-                                #Migrate vm to the host where PCI devices are availble
-                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
-                                task = self.relocate_vm(new_host_obj, vm_obj)
-                                if task is not None:
-                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
-                                    self.logger.info("Migrate VM status: {}".format(result))
-                                    host_obj = new_host_obj
-                                else:
-                                    self.logger.info("Fail to migrate VM : {}".format(result))
-                                    raise vimconn.vimconnNotFoundException(
-                                    "Fail to migrate VM : {} to host {}".format(
-                                                    vmname_andid,
-                                                    new_host_obj)
-                                        )
-
-                        if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
-                            #Add PCI devices one by one
-                            for pci_device in avilable_pci_devices:
-                                task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
-                                if task:
-                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
-                                    if status:
-                                        self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
-                                else:
-                                    self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
-                            return True, vm_obj, vcenter_conect
-                        else:
-                            self.logger.error("Currently there is no host with"\
-                                              " {} number of avaialble PCI devices required for VM {}".format(
-                                                                            no_of_pci_devices,
-                                                                            vmname_andid)
-                                              )
-                            raise vimconn.vimconnNotFoundException(
-                                    "Currently there is no host with {} "\
-                                    "number of avaialble PCI devices required for VM {}".format(
-                                                                            no_of_pci_devices,
-                                                                            vmname_andid))
-                else:
-                    self.logger.debug("No infromation about PCI devices {} ",pci_devices)
-
-            except vmodl.MethodFault as error:
-                self.logger.error("Error occurred while adding PCI devices {} ",error)
-        return None, vm_obj, vcenter_conect
-
-    def get_vm_obj(self, content, mob_id):
-        """
-            Method to get the vsphere VM object associated with a given morf ID
-             Args:
-                vapp_uuid - uuid of vApp/VM
-                content - vCenter content object
-                mob_id - mob_id of VM
-
-            Returns:
-                    VM and host object
-        """
-        vm_obj = None
-        host_obj = None
-        try :
-            container = content.viewManager.CreateContainerView(content.rootFolder,
-                                                        [vim.VirtualMachine], True
-                                                        )
-            for vm in container.view:
-                mobID = vm._GetMoId()
-                if mobID == mob_id:
-                    vm_obj = vm
-                    host_obj = vm_obj.runtime.host
-                    break
-        except Exception as exp:
-            self.logger.error("Error occurred while finding VM object : {}".format(exp))
-        return host_obj, vm_obj
-
-    def get_pci_devices(self, host, need_devices):
-        """
-            Method to get the details of pci devices on given host
-             Args:
-                host - vSphere host object
-                need_devices - number of pci devices needed on host
-
-             Returns:
-                array of pci devices
-        """
-        all_devices = []
-        all_device_ids = []
-        used_devices_ids = []
-
-        try:
-            if host:
-                pciPassthruInfo = host.config.pciPassthruInfo
-                pciDevies = host.hardware.pciDevice
-
-            for pci_status in pciPassthruInfo:
-                if pci_status.passthruActive:
-                    for device in pciDevies:
-                        if device.id == pci_status.id:
-                            all_device_ids.append(device.id)
-                            all_devices.append(device)
-
-            #check if devices are in use
-            avalible_devices = all_devices
-            for vm in host.vm:
-                if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
-                    vm_devices = vm.config.hardware.device
-                    for device in vm_devices:
-                        if type(device) is vim.vm.device.VirtualPCIPassthrough:
-                            if device.backing.id in all_device_ids:
-                                for use_device in avalible_devices:
-                                    if use_device.id == device.backing.id:
-                                        avalible_devices.remove(use_device)
-                                used_devices_ids.append(device.backing.id)
-                                self.logger.debug("Device {} from devices {}"\
-                                        "is in use".format(device.backing.id,
-                                                           device)
-                                            )
-            if len(avalible_devices) < need_devices:
-                self.logger.debug("Host {} don't have {} number of active devices".format(host,
-                                                                            need_devices))
-                self.logger.debug("found only {} devives {}".format(len(avalible_devices),
-                                                                    avalible_devices))
-                return None
-            else:
-                required_devices = avalible_devices[:need_devices]
-                self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
-                                                            len(avalible_devices),
-                                                            host,
-                                                            need_devices))
-                self.logger.info("Retruning {} devices as {}".format(need_devices,
-                                                                required_devices ))
-                return required_devices
-
-        except Exception as exp:
-            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
-
-        return None
-
-    def get_host_and_PCIdevices(self, content, need_devices):
-        """
-         Method to get the details of pci devices infromation on all hosts
-
-            Args:
-                content - vSphere host object
-                need_devices - number of pci devices needed on host
-
-            Returns:
-                 array of pci devices and host object
-        """
-        host_obj = None
-        pci_device_objs = None
-        try:
-            if content:
-                container = content.viewManager.CreateContainerView(content.rootFolder,
-                                                            [vim.HostSystem], True)
-                for host in container.view:
-                    devices = self.get_pci_devices(host, need_devices)
-                    if devices:
-                        host_obj = host
-                        pci_device_objs = devices
-                        break
-        except Exception as exp:
-            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
-
-        return host_obj,pci_device_objs
-
-    def relocate_vm(self, dest_host, vm) :
-        """
-         Method to get the relocate VM to new host
-
-            Args:
-                dest_host - vSphere host object
-                vm - vSphere VM object
-
-            Returns:
-                task object
-        """
-        task = None
-        try:
-            relocate_spec = vim.vm.RelocateSpec(host=dest_host)
-            task = vm.Relocate(relocate_spec)
-            self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
-        except Exception as exp:
-            self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
-                                                                            dest_host, vm, exp))
-        return task
-
-    def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
-        """
-        Waits and provides updates on a vSphere task
-        """
-        while task.info.state == vim.TaskInfo.State.running:
-            time.sleep(2)
-
-        if task.info.state == vim.TaskInfo.State.success:
-            if task.info.result is not None and not hideResult:
-                self.logger.info('{} completed successfully, result: {}'.format(
-                                                            actionName,
-                                                            task.info.result))
-            else:
-                self.logger.info('Task {} completed successfully.'.format(actionName))
-        else:
-            self.logger.error('{} did not complete successfully: {} '.format(
-                                                            actionName,
-                                                            task.info.error)
-                              )
-
-        return task.info.result
-
-    def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
-        """
-         Method to add pci device in given VM
-
-            Args:
-                host_object - vSphere host object
-                vm_object - vSphere VM object
-                host_pci_dev -  host_pci_dev must be one of the devices from the
-                                host_object.hardware.pciDevice list
-                                which is configured as a PCI passthrough device
-
-            Returns:
-                task object
-        """
-        task = None
-        if vm_object and host_object and host_pci_dev:
-            try :
-                #Add PCI device to VM
-                pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
-                systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
-
-                if host_pci_dev.id not in systemid_by_pciid:
-                    self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
-                    return None
-
-                deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
-                backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
-                                            id=host_pci_dev.id,
-                                            systemId=systemid_by_pciid[host_pci_dev.id],
-                                            vendorId=host_pci_dev.vendorId,
-                                            deviceName=host_pci_dev.deviceName)
-
-                hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
-
-                new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
-                new_device_config.operation = "add"
-                vmConfigSpec = vim.vm.ConfigSpec()
-                vmConfigSpec.deviceChange = [new_device_config]
-
-                task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
-                self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
-                                                            host_pci_dev, vm_object, host_object)
-                                )
-            except Exception as exp:
-                self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
-                                                                            host_pci_dev,
-                                                                            vm_object,
-                                                                             exp))
-        return task
-
-    def get_vm_vcenter_info(self):
-        """
-        Method to get details of vCenter and vm
-
-            Args:
-                vapp_uuid - uuid of vApp or VM
-
-            Returns:
-                Moref Id of VM and deails of vCenter
-        """
-        vm_vcenter_info = {}
-
-        if self.vcenter_ip is not None:
-            vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
-        else:
-            raise vimconn.vimconnException(message="vCenter IP is not provided."\
-                                           " Please provide vCenter IP while attaching datacenter to tenant in --config")
-        if self.vcenter_port is not None:
-            vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
-        else:
-            raise vimconn.vimconnException(message="vCenter port is not provided."\
-                                           " Please provide vCenter port while attaching datacenter to tenant in --config")
-        if self.vcenter_user is not None:
-            vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
-        else:
-            raise vimconn.vimconnException(message="vCenter user is not provided."\
-                                           " Please provide vCenter user while attaching datacenter to tenant in --config")
-
-        if self.vcenter_password is not None:
-            vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
-        else:
-            raise vimconn.vimconnException(message="vCenter user password is not provided."\
-                                           " Please provide vCenter user password while attaching datacenter to tenant in --config")
-
-        return vm_vcenter_info
-
-
-    def get_vm_pci_details(self, vmuuid):
-        """
-            Method to get VM PCI device details from vCenter
-
-            Args:
-                vm_obj - vSphere VM object
-
-            Returns:
-                dict of PCI devives attached to VM
-
-        """
-        vm_pci_devices_info = {}
-        try:
-            vcenter_conect, content = self.get_vcenter_content()
-            vm_moref_id = self.get_vm_moref_id(vmuuid)
-            if vm_moref_id:
-                #Get VM and its host
-                if content:
-                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
-                    if host_obj and vm_obj:
-                        vm_pci_devices_info["host_name"]= host_obj.name
-                        vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
-                        for device in vm_obj.config.hardware.device:
-                            if type(device) == vim.vm.device.VirtualPCIPassthrough:
-                                device_details={'devide_id':device.backing.id,
-                                                'pciSlotNumber':device.slotInfo.pciSlotNumber,
-                                            }
-                                vm_pci_devices_info[device.deviceInfo.label] = device_details
-                else:
-                    self.logger.error("Can not connect to vCenter while getting "\
-                                          "PCI devices infromationn")
-                return vm_pci_devices_info
-        except Exception as exp:
-            self.logger.error("Error occurred while getting VM infromationn"\
-                             " for VM : {}".format(exp))
-            raise vimconn.vimconnException(message=exp)
-
-
-    def reserve_memory_for_all_vms(self, vapp, memory_mb):
-        """
-            Method to reserve memory for all VMs
-            Args :
-                vapp - VApp
-                memory_mb - Memory in MB
-            Returns:
-                None
-        """
-
-        self.logger.info("Reserve memory for all VMs")
-        for vms in vapp.get_all_vms():
-            vm_id = vms.get('id').split(':')[-1]
-
-            url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
-
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
-            response = self.perform_request(req_type='GET',
-                                            url=url_rest_call,
-                                            headers=headers)
-
-            if response.status_code == 403:
-                response = self.retry_rest('GET', url_rest_call)
-
-            if response.status_code != 200:
-                self.logger.error("REST call {} failed reason : {}"\
-                                  "status code : {}".format(url_rest_call,
-                                                            response.content,
-                                                            response.status_code))
-                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
-                                               "memory")
-
-            bytexml = bytes(bytearray(response.content, encoding='utf-8'))
-            contentelem = lxmlElementTree.XML(bytexml)
-            namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
-            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
-
-            # Find the reservation element in the response
-            memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
-            for memelem in memelem_list:
-                memelem.text = str(memory_mb)
-
-            newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
-
-            response = self.perform_request(req_type='PUT',
-                                            url=url_rest_call,
-                                            headers=headers,
-                                            data=newdata)
-
-            if response.status_code == 403:
-                add_headers = {'Content-Type': headers['Content-Type']}
-                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
-
-            if response.status_code != 202:
-                self.logger.error("REST call {} failed reason : {}"\
-                                  "status code : {} ".format(url_rest_call,
-                                  response.content,
-                                  response.status_code))
-                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
-                                               "virtual hardware memory section")
-            else:
-                mem_task = self.get_task_from_response(response.content)
-                result = self.client.get_task_monitor().wait_for_success(task=mem_task)
-                if result.get('status') == 'success':
-                    self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
-                                      .format(vm_id))
-                else:
-                    self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
-                                      .format(vm_id))
-
-    def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
-        """
-            Configure VApp network config with org vdc network
-            Args :
-                vapp - VApp
-            Returns:
-                None
-        """
-
-        self.logger.info("Connecting vapp {} to org vdc network {}".
-                         format(vapp_id, net_name))
-
-        url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
-
-        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-        response = self.perform_request(req_type='GET',
-                                        url=url_rest_call,
-                                        headers=headers)
-
-        if response.status_code == 403:
-            response = self.retry_rest('GET', url_rest_call)
-
-        if response.status_code != 200:
-            self.logger.error("REST call {} failed reason : {}"\
-                              "status code : {}".format(url_rest_call,
-                                                        response.content,
-                                                        response.status_code))
-            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
-                                           "network config section")
-
-        data = response.content
-        headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
-        net_id = self.get_network_id_by_name(net_name)
-        if not net_id:
-            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
-                                           "existing network")
-
-        bytexml = bytes(bytearray(data, encoding='utf-8'))
-        newelem = lxmlElementTree.XML(bytexml)
-        namespaces = {prefix: uri for prefix, uri in newelem.nsmap.iteritems() if prefix}
-        namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
-        nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
-
-        # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
-        parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
-        if parentnetworklist:
-            for pn in parentnetworklist:
-                if "href" not in pn.keys():
-                    id_val = pn.get("id")
-                    href_val = "{}/api/network/{}".format(self.url, id_val)
-                    pn.set("href", href_val)
-
-        newstr = """<NetworkConfig networkName="{}">
-                  <Configuration>
-                       <ParentNetwork href="{}/api/network/{}"/>
-                       <FenceMode>bridged</FenceMode>
-                  </Configuration>
-              </NetworkConfig>
-           """.format(net_name, self.url, net_id)
-        newcfgelem = lxmlElementTree.fromstring(newstr)
-        if nwcfglist:
-            nwcfglist[0].addnext(newcfgelem)
-
-        newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
-
-        response = self.perform_request(req_type='PUT',
-                                        url=url_rest_call,
-                                        headers=headers,
-                                        data=newdata)
-
-        if response.status_code == 403:
-            add_headers = {'Content-Type': headers['Content-Type']}
-            response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
-
-        if response.status_code != 202:
-            self.logger.error("REST call {} failed reason : {}"\
-                              "status code : {} ".format(url_rest_call,
-                              response.content,
-                              response.status_code))
-            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
-                                           "network config section")
-        else:
-            vapp_task = self.get_task_from_response(response.content)
-            result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
-            if result.get('status') == 'success':
-                self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
-                                 "network {}".format(vapp_id, net_name))
-            else:
-                self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
-                                  "connect to network {}".format(vapp_id, net_name))
-
-    def remove_primary_network_adapter_from_all_vms(self, vapp):
-        """
-            Method to remove network adapter type to vm
-            Args :
-                vapp - VApp
-            Returns:
-                None
-        """
-
-        self.logger.info("Removing network adapter from all VMs")
-        for vms in vapp.get_all_vms():
-            vm_id = vms.get('id').split(':')[-1]
-
-            url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
-
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='GET',
-                                            url=url_rest_call,
-                                            headers=headers)
-
-            if response.status_code == 403:
-                response = self.retry_rest('GET', url_rest_call)
-
-            if response.status_code != 200:
-                self.logger.error("REST call {} failed reason : {}"\
-                                  "status code : {}".format(url_rest_call,
-                                                            response.content,
-                                                            response.status_code))
-                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
-                                               "network connection section")
-
-            data = response.content
-            data = data.split('<Link rel="edit"')[0]
-
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
-
-            newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-                      <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
-                              xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
-                              xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
-                              xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
-                              xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
-                              xmlns:vmw="http://www.vmware.com/schema/ovf"
-                              xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
-                              xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
-                              xmlns:ns9="http://www.vmware.com/vcloud/versions"
-                              href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
-                              <ovf:Info>Specifies the available VM network connections</ovf:Info>
-                             <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
-                             <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
-                      </NetworkConnectionSection>""".format(url=url_rest_call)
-            response = self.perform_request(req_type='PUT',
-                                            url=url_rest_call,
-                                            headers=headers,
-                                            data=newdata)
-
-            if response.status_code == 403:
-                add_headers = {'Content-Type': headers['Content-Type']}
-                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
-
-            if response.status_code != 202:
-                self.logger.error("REST call {} failed reason : {}"\
-                                  "status code : {} ".format(url_rest_call,
-                                  response.content,
-                                  response.status_code))
-                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
-                                               "network connection section")
-            else:
-                nic_task = self.get_task_from_response(response.content)
-                result = self.client.get_task_monitor().wait_for_success(task=nic_task)
-                if result.get('status') == 'success':
-                    self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
-                                      "default NIC type".format(vm_id))
-                else:
-                    self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
-                                      "connect NIC type".format(vm_id))
-
-    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
-        """
-            Method to add network adapter type to vm
-            Args :
-                network_name - name of network
-                primary_nic_index - int value for primary nic index
-                nicIndex - int value for nic index
-                nic_type - specify model name to which add to vm
-            Returns:
-                None
-        """
-
-        self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
-                         format(network_name, nicIndex, nic_type))
-        try:
-            ip_address = None
-            floating_ip = False
-            mac_address = None
-            if 'floating_ip' in net: floating_ip = net['floating_ip']
-
-            # Stub for ip_address feature
-            if 'ip_address' in net: ip_address = net['ip_address']
-
-            if 'mac_address' in net: mac_address = net['mac_address']
-
-            if floating_ip:
-                allocation_mode = "POOL"
-            elif ip_address:
-                allocation_mode = "MANUAL"
-            else:
-                allocation_mode = "DHCP"
-
-            if not nic_type:
-                for vms in vapp.get_all_vms():
-                    vm_id = vms.get('id').split(':')[-1]
-
-                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
-
-                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                    response = self.perform_request(req_type='GET',
-                                                    url=url_rest_call,
-                                                    headers=headers)
-
-                    if response.status_code == 403:
-                        response = self.retry_rest('GET', url_rest_call)
-
-                    if response.status_code != 200:
-                        self.logger.error("REST call {} failed reason : {}"\
-                                             "status code : {}".format(url_rest_call,
-                                                                    response.content,
-                                                               response.status_code))
-                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
-                                                                         "network connection section")
-
-                    data = response.content
-                    data = data.split('<Link rel="edit"')[0]
-                    if '<PrimaryNetworkConnectionIndex>' not in data:
-                        self.logger.debug("add_network_adapter PrimaryNIC not in data")
-                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
-                                <NetworkConnection network="{}">
-                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
-                                <IsConnected>true</IsConnected>
-                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
-                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
-                                                                                         allocation_mode)
-                        # Stub for ip_address feature
-                        if ip_address:
-                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
-                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
-
-                        if mac_address:
-                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
-                            item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
-
-                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
-                    else:
-                        self.logger.debug("add_network_adapter PrimaryNIC in data")
-                        new_item = """<NetworkConnection network="{}">
-                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
-                                    <IsConnected>true</IsConnected>
-                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
-                                    </NetworkConnection>""".format(network_name, nicIndex,
-                                                                          allocation_mode)
-                        # Stub for ip_address feature
-                        if ip_address:
-                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
-                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
-
-                        if mac_address:
-                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
-                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
-
-                        data = data + new_item + '</NetworkConnectionSection>'
-
-                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
-
-                    response = self.perform_request(req_type='PUT',
-                                                    url=url_rest_call,
-                                                    headers=headers,
-                                                    data=data)
-
-                    if response.status_code == 403:
-                        add_headers = {'Content-Type': headers['Content-Type']}
-                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
-
-                    if response.status_code != 202:
-                        self.logger.error("REST call {} failed reason : {}"\
-                                            "status code : {} ".format(url_rest_call,
-                                                                    response.content,
-                                                               response.status_code))
-                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
-                                                                            "network connection section")
-                    else:
-                        nic_task = self.get_task_from_response(response.content)
-                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
-                        if result.get('status') == 'success':
-                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
-                                                               "default NIC type".format(vm_id))
-                        else:
-                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
-                                                              "connect NIC type".format(vm_id))
-            else:
-                for vms in vapp.get_all_vms():
-                    vm_id = vms.get('id').split(':')[-1]
-
-                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
-
-                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-                    response = self.perform_request(req_type='GET',
-                                                    url=url_rest_call,
-                                                    headers=headers)
-
-                    if response.status_code == 403:
-                        response = self.retry_rest('GET', url_rest_call)
-
-                    if response.status_code != 200:
-                        self.logger.error("REST call {} failed reason : {}"\
-                                            "status code : {}".format(url_rest_call,
-                                                                   response.content,
-                                                              response.status_code))
-                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
-                                                                        "network connection section")
-                    data = response.content
-                    data = data.split('<Link rel="edit"')[0]
-                    vcd_netadapter_type = nic_type
-                    if nic_type in ['SR-IOV', 'VF']:
-                        vcd_netadapter_type = "SRIOVETHERNETCARD"
-
-                    if '<PrimaryNetworkConnectionIndex>' not in data:
-                        self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
-                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
-                                <NetworkConnection network="{}">
-                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
-                                <IsConnected>true</IsConnected>
-                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
-                                <NetworkAdapterType>{}</NetworkAdapterType>
-                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
-                                                                               allocation_mode, vcd_netadapter_type)
-                        # Stub for ip_address feature
-                        if ip_address:
-                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
-                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
-
-                        if mac_address:
-                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
-                            item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
-
-                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
-                    else:
-                        self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
-                        new_item = """<NetworkConnection network="{}">
-                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
-                                    <IsConnected>true</IsConnected>
-                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
-                                    <NetworkAdapterType>{}</NetworkAdapterType>
-                                    </NetworkConnection>""".format(network_name, nicIndex,
-                                                                allocation_mode, vcd_netadapter_type)
-                        # Stub for ip_address feature
-                        if ip_address:
-                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
-                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
-
-                        if mac_address:
-                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
-                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
-
-                        data = data + new_item + '</NetworkConnectionSection>'
-
-                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
-
-                    response = self.perform_request(req_type='PUT',
-                                                    url=url_rest_call,
-                                                    headers=headers,
-                                                    data=data)
-
-                    if response.status_code == 403:
-                        add_headers = {'Content-Type': headers['Content-Type']}
-                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
-
-                    if response.status_code != 202:
-                        self.logger.error("REST call {} failed reason : {}"\
-                                            "status code : {}".format(url_rest_call,
-                                                                   response.content,
-                                                              response.status_code))
-                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
-                                                                           "network connection section")
-                    else:
-                        nic_task = self.get_task_from_response(response.content)
-                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
-                        if result.get('status') == 'success':
-                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
-                                               "conneced to NIC type {}".format(vm_id, nic_type))
-                        else:
-                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
-                                               "failed to connect NIC type {}".format(vm_id, nic_type))
-        except Exception as exp:
-            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
-                                               "while adding Network adapter")
-            raise vimconn.vimconnException(message=exp)
-
-
-    def set_numa_affinity(self, vmuuid, paired_threads_id):
-        """
-            Method to assign numa affinity in vm configuration parammeters
-            Args :
-                vmuuid - vm uuid
-                paired_threads_id - one or more virtual processor
-                                    numbers
-            Returns:
-                return if True
-        """
-        try:
-            vcenter_conect, content = self.get_vcenter_content()
-            vm_moref_id = self.get_vm_moref_id(vmuuid)
-
-            host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
-            if vm_obj:
-                config_spec = vim.vm.ConfigSpec()
-                config_spec.extraConfig = []
-                opt = vim.option.OptionValue()
-                opt.key = 'numa.nodeAffinity'
-                opt.value = str(paired_threads_id)
-                config_spec.extraConfig.append(opt)
-                task = vm_obj.ReconfigVM_Task(config_spec)
-                if task:
-                    result = self.wait_for_vcenter_task(task, vcenter_conect)
-                    extra_config = vm_obj.config.extraConfig
-                    flag = False
-                    for opts in extra_config:
-                        if 'numa.nodeAffinity' in opts.key:
-                            flag = True
-                            self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
-                                                     "value {} for vm {}".format(opt.value, vm_obj))
-                        if flag:
-                            return
-            else:
-                self.logger.error("set_numa_affinity: Failed to assign numa affinity")
-        except Exception as exp:
-            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
-                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
-            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
-                                                                           "affinity".format(exp))
-
-
-    def cloud_init(self, vapp, cloud_config):
-        """
-        Method to inject ssh-key
-        vapp - vapp object
-        cloud_config a dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
-        """
-        try:
-            if not isinstance(cloud_config, dict):
-                raise Exception("cloud_init : parameter cloud_config is not a dictionary")
-            else:
-                key_pairs = []
-                userdata = []
-                if "key-pairs" in cloud_config:
-                    key_pairs = cloud_config["key-pairs"]
-
-                if "users" in cloud_config:
-                    userdata = cloud_config["users"]
-
-                self.logger.debug("cloud_init : Guest os customization started..")
-                customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
-                customize_script = customize_script.replace("&","&amp;")
-                self.guest_customization(vapp, customize_script)
-
-        except Exception as exp:
-            self.logger.error("cloud_init : exception occurred while injecting "\
-                                                                       "ssh-key")
-            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
-                                                               "ssh-key".format(exp))
-
-    def format_script(self, key_pairs=[], users_list=[]):
-        bash_script = """#!/bin/sh
-        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
-        if [ "$1" = "precustomization" ];then
-            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
-        """
-
-        keys = "\n".join(key_pairs)
-        if keys:
-            keys_data = """
-            if [ ! -d /root/.ssh ];then
-                mkdir /root/.ssh
-                chown root:root /root/.ssh
-                chmod 700 /root/.ssh
-                touch /root/.ssh/authorized_keys
-                chown root:root /root/.ssh/authorized_keys
-                chmod 600 /root/.ssh/authorized_keys
-                # make centos with selinux happy
-                which restorecon && restorecon -Rv /root/.ssh
-            else
-                touch /root/.ssh/authorized_keys
-                chown root:root /root/.ssh/authorized_keys
-                chmod 600 /root/.ssh/authorized_keys
-            fi
-            echo '{key}' >> /root/.ssh/authorized_keys
-            """.format(key=keys)
-
-            bash_script+= keys_data
-
-        for user in users_list:
-            if 'name' in user: user_name = user['name']
-            if 'key-pairs' in user:
-                user_keys = "\n".join(user['key-pairs'])
-            else:
-                user_keys = None
-
-            add_user_name = """
-                useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
-                """.format(user_name=user_name)
-
-            bash_script+= add_user_name
-
-            if user_keys:
-                user_keys_data = """
-                mkdir /home/{user_name}/.ssh
-                chown {user_name}:{user_name} /home/{user_name}/.ssh
-                chmod 700 /home/{user_name}/.ssh
-                touch /home/{user_name}/.ssh/authorized_keys
-                chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
-                chmod 600 /home/{user_name}/.ssh/authorized_keys
-                # make centos with selinux happy
-                which restorecon && restorecon -Rv /home/{user_name}/.ssh
-                echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
-                """.format(user_name=user_name,user_key=user_keys)
-
-                bash_script+= user_keys_data
-
-        return bash_script+"\n\tfi"
-
-    def guest_customization(self, vapp, customize_script):
-        """
-        Method to customize guest os
-        vapp - Vapp object
-        customize_script - Customize script to be run at first boot of VM.
-        """
-        for vm in vapp.get_all_vms():
-            vm_id = vm.get('id').split(':')[-1]
-            vm_name = vm.get('name')
-            vm_name = vm_name.replace('_','-')
-
-            vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-            headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
-
-            data = """<GuestCustomizationSection
-                           xmlns="http://www.vmware.com/vcloud/v1.5"
-                           xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
-                           ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
-                           <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
-                           <Enabled>true</Enabled>
-                           <ChangeSid>false</ChangeSid>
-                           <VirtualMachineId>{}</VirtualMachineId>
-                           <JoinDomainEnabled>false</JoinDomainEnabled>
-                           <UseOrgSettings>false</UseOrgSettings>
-                           <AdminPasswordEnabled>false</AdminPasswordEnabled>
-                           <AdminPasswordAuto>true</AdminPasswordAuto>
-                           <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
-                           <AdminAutoLogonCount>0</AdminAutoLogonCount>
-                           <ResetPasswordRequired>false</ResetPasswordRequired>
-                           <CustomizationScript>{}</CustomizationScript>
-                           <ComputerName>{}</ComputerName>
-                           <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
-                       </GuestCustomizationSection>
-                   """.format(vm_customization_url,
-                                             vm_id,
-                                  customize_script,
-                                           vm_name,
-                              vm_customization_url)
-
-            response = self.perform_request(req_type='PUT',
-                                             url=vm_customization_url,
-                                             headers=headers,
-                                             data=data)
-            if response.status_code == 202:
-                guest_task = self.get_task_from_response(response.content)
-                self.client.get_task_monitor().wait_for_success(task=guest_task)
-                self.logger.info("guest_customization : customized guest os task "\
-                                             "completed for VM {}".format(vm_name))
-            else:
-                self.logger.error("guest_customization : task for customized guest os"\
-                                                    "failed for VM {}".format(vm_name))
-                raise vimconn.vimconnException("guest_customization : failed to perform"\
-                                       "guest os customization on VM {}".format(vm_name))
-
-    def add_new_disk(self, vapp_uuid, disk_size):
-        """
-            Method to create an empty vm disk
-
-            Args:
-                vapp_uuid - is vapp identifier.
-                disk_size - size of disk to be created in GB
-
-            Returns:
-                None
-        """
-        status = False
-        vm_details = None
-        try:
-            #Disk size in GB, convert it into MB
-            if disk_size is not None:
-                disk_size_mb = int(disk_size) * 1024
-                vm_details = self.get_vapp_details_rest(vapp_uuid)
-
-            if vm_details and "vm_virtual_hardware" in vm_details:
-                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
-                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
-                status = self.add_new_disk_rest(disk_href, disk_size_mb)
-
-        except Exception as exp:
-            msg = "Error occurred while creating new disk {}.".format(exp)
-            self.rollback_newvm(vapp_uuid, msg)
-
-        if status:
-            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
-        else:
-            #If failed to add disk, delete VM
-            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
-            self.rollback_newvm(vapp_uuid, msg)
-
-
-    def add_new_disk_rest(self, disk_href, disk_size_mb):
-        """
-        Retrives vApp Disks section & add new empty disk
-
-        Args:
-            disk_href: Disk section href to addd disk
-            disk_size_mb: Disk size in MB
-
-            Returns: Status of add new disk task
-        """
-        status = False
-        if self.client._session:
-            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-            response = self.perform_request(req_type='GET',
-                                            url=disk_href,
-                                            headers=headers)
-
-        if response.status_code == 403:
-            response = self.retry_rest('GET', disk_href)
-
-        if response.status_code != requests.codes.ok:
-            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
-                              .format(disk_href, response.status_code))
-            return status
-        try:
-            #Find but type & max of instance IDs assigned to disks
-            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
-            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
-            #For python3
-            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
-            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
-            instance_id = 0
-            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
-                if item.find("rasd:Description",namespaces).text == "Hard disk":
-                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
-                    if inst_id > instance_id:
-                        instance_id = inst_id
-                        disk_item = item.find("rasd:HostResource" ,namespaces)
-                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
-                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
-
-            instance_id = instance_id + 1
-            new_item =   """<Item>
-                                <rasd:Description>Hard disk</rasd:Description>
-                                <rasd:ElementName>New disk</rasd:ElementName>
-                                <rasd:HostResource
-                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
-                                    vcloud:capacity="{}"
-                                    vcloud:busSubType="{}"
-                                    vcloud:busType="{}"></rasd:HostResource>
-                                <rasd:InstanceID>{}</rasd:InstanceID>
-                                <rasd:ResourceType>17</rasd:ResourceType>
-                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
-
-            new_data = response.content
-            #Add new item at the bottom
-            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
-
-            # Send PUT request to modify virtual hardware section with new disk
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
-
-            response = self.perform_request(req_type='PUT',
-                                            url=disk_href,
-                                            data=new_data,
-                                            headers=headers)
-
-            if response.status_code == 403:
-                add_headers = {'Content-Type': headers['Content-Type']}
-                response = self.retry_rest('PUT', disk_href, add_headers, new_data)
-
-            if response.status_code != 202:
-                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
-                                  .format(disk_href, response.status_code, response.content))
-            else:
-                add_disk_task = self.get_task_from_response(response.content)
-                result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
-                if result.get('status') == 'success':
-                    status = True
-                else:
-                    self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
-
-        except Exception as exp:
-            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
-
-        return status
-
-
-    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
-        """
-            Method to add existing disk to vm
-            Args :
-                catalogs - List of VDC catalogs
-                image_id - Catalog ID
-                template_name - Name of template in catalog
-                vapp_uuid - UUID of vApp
-            Returns:
-                None
-        """
-        disk_info = None
-        vcenter_conect, content = self.get_vcenter_content()
-        #find moref-id of vm in image
-        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
-                                                         image_id=image_id,
-                                                        )
-
-        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
-            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
-                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
-                if catalog_vm_moref_id:
-                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
-                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
-                    if catalog_vm_obj:
-                        #find existing disk
-                        disk_info = self.find_disk(catalog_vm_obj)
-                    else:
-                        exp_msg = "No VM with image id {} found".format(image_id)
-                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
-        else:
-            exp_msg = "No Image found with image ID {} ".format(image_id)
-            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
-
-        if disk_info:
-            self.logger.info("Existing disk_info : {}".format(disk_info))
-            #get VM
-            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
-            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
-            if vm_obj:
-                status = self.add_disk(vcenter_conect=vcenter_conect,
-                                       vm=vm_obj,
-                                       disk_info=disk_info,
-                                       size=size,
-                                       vapp_uuid=vapp_uuid
-                                       )
-            if status:
-                self.logger.info("Disk from image id {} added to {}".format(image_id,
-                                                                            vm_obj.config.name)
-                                 )
-        else:
-            msg = "No disk found with image id {} to add in VM {}".format(
-                                                            image_id,
-                                                            vm_obj.config.name)
-            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
-
-
-    def find_disk(self, vm_obj):
-        """
-         Method to find details of existing disk in VM
-            Args :
-                vm_obj - vCenter object of VM
-                image_id - Catalog ID
-            Returns:
-                disk_info : dict of disk details
-        """
-        disk_info = {}
-        if vm_obj:
-            try:
-                devices = vm_obj.config.hardware.device
-                for device in devices:
-                    if type(device) is vim.vm.device.VirtualDisk:
-                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
-                            disk_info["full_path"] = device.backing.fileName
-                            disk_info["datastore"] = device.backing.datastore
-                            disk_info["capacityKB"] = device.capacityInKB
-                            break
-            except Exception as exp:
-                self.logger.error("find_disk() : exception occurred while "\
-                                  "getting existing disk details :{}".format(exp))
-        return disk_info
-
-
-    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
-        """
-         Method to add existing disk in VM
-            Args :
-                vcenter_conect - vCenter content object
-                vm - vCenter vm object
-                disk_info : dict of disk details
-            Returns:
-                status : status of add disk task
-        """
-        datastore = disk_info["datastore"] if "datastore" in disk_info else None
-        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
-        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
-        if size is not None:
-            #Convert size from GB to KB
-            sizeKB = int(size) * 1024 * 1024
-            #compare size of existing disk and user given size.Assign whicherver is greater
-            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
-                                                                    sizeKB, capacityKB))
-            if sizeKB > capacityKB:
-                capacityKB = sizeKB
-
-        if datastore and fullpath and capacityKB:
-            try:
-                spec = vim.vm.ConfigSpec()
-                # get all disks on a VM, set unit_number to the next available
-                unit_number = 0
-                for dev in vm.config.hardware.device:
-                    if hasattr(dev.backing, 'fileName'):
-                        unit_number = int(dev.unitNumber) + 1
-                        # unit_number 7 reserved for scsi controller
-                        if unit_number == 7:
-                            unit_number += 1
-                    if isinstance(dev, vim.vm.device.VirtualDisk):
-                        #vim.vm.device.VirtualSCSIController
-                        controller_key = dev.controllerKey
-
-                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
-                                                                    unit_number, controller_key))
-                # add disk here
-                dev_changes = []
-                disk_spec = vim.vm.device.VirtualDeviceSpec()
-                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
-                disk_spec.device = vim.vm.device.VirtualDisk()
-                disk_spec.device.backing = \
-                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
-                disk_spec.device.backing.thinProvisioned = True
-                disk_spec.device.backing.diskMode = 'persistent'
-                disk_spec.device.backing.datastore  = datastore
-                disk_spec.device.backing.fileName  = fullpath
-
-                disk_spec.device.unitNumber = unit_number
-                disk_spec.device.capacityInKB = capacityKB
-                disk_spec.device.controllerKey = controller_key
-                dev_changes.append(disk_spec)
-                spec.deviceChange = dev_changes
-                task = vm.ReconfigVM_Task(spec=spec)
-                status = self.wait_for_vcenter_task(task, vcenter_conect)
-                return status
-            except Exception as exp:
-                exp_msg = "add_disk() : exception {} occurred while adding disk "\
-                          "{} to vm {}".format(exp,
-                                               fullpath,
-                                               vm.config.name)
-                self.rollback_newvm(vapp_uuid, exp_msg)
-        else:
-            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
-            self.rollback_newvm(vapp_uuid, msg)
-
-
-    def get_vcenter_content(self):
-        """
-         Get the vsphere content object
-        """
-        try:
-            vm_vcenter_info = self.get_vm_vcenter_info()
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
-            raise vimconn.vimconnException(message=exp)
-
-        context = None
-        if hasattr(ssl, '_create_unverified_context'):
-            context = ssl._create_unverified_context()
-
-        vcenter_conect = SmartConnect(
-                    host=vm_vcenter_info["vm_vcenter_ip"],
-                    user=vm_vcenter_info["vm_vcenter_user"],
-                    pwd=vm_vcenter_info["vm_vcenter_password"],
-                    port=int(vm_vcenter_info["vm_vcenter_port"]),
-                    sslContext=context
-                )
-        atexit.register(Disconnect, vcenter_conect)
-        content = vcenter_conect.RetrieveContent()
-        return vcenter_conect, content
-
-
-    def get_vm_moref_id(self, vapp_uuid):
-        """
-        Get the moref_id of given VM
-        """
-        try:
-            if vapp_uuid:
-                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
-                if vm_details and "vm_vcenter_info" in vm_details:
-                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-            return vm_moref_id
-
-        except Exception as exp:
-            self.logger.error("Error occurred while getting VM moref ID "\
-                             " for VM : {}".format(exp))
-            return None
-
-
-    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
-        """
-            Method to get vApp template details
-                Args :
-                    catalogs - list of VDC catalogs
-                    image_id - Catalog ID to find
-                    template_name : template name in catalog
-                Returns:
-                    parsed_respond : dict of vApp tempalte details
-        """
-        parsed_response = {}
-
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCD")
-
-        try:
-            org, vdc = self.get_vdc_details()
-            catalog = self.get_catalog_obj(image_id, catalogs)
-            if catalog:
-                items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
-                catalog_items = [items.attrib]
-
-                if len(catalog_items) == 1:
-                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-
-                    response = self.perform_request(req_type='GET',
-                                                    url=catalog_items[0].get('href'),
-                                                    headers=headers)
-                    catalogItem = XmlElementTree.fromstring(response.content)
-                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
-                    vapp_tempalte_href = entity.get("href")
-                    #get vapp details and parse moref id
-
-                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
-                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
-                                  'vmw': 'http://www.vmware.com/schema/ovf',
-                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
-                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
-                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
-                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
-                                }
-
-                    if vca._session:
-                        response = self.perform_request(req_type='GET',
-                                                    url=vapp_tempalte_href,
-                                                    headers=headers)
-
-                        if response.status_code != requests.codes.ok:
-                            self.logger.debug("REST API call {} failed. Return status code {}".format(
-                                                vapp_tempalte_href, response.status_code))
-
-                        else:
-                            xmlroot_respond = XmlElementTree.fromstring(response.content)
-                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
-                            if children_section is not None:
-                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
-                            if vCloud_extension_section is not None:
-                                vm_vcenter_info = {}
-                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
-                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
-                                if vmext is not None:
-                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
-                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
-
-        except Exception as exp :
-            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
-
-        return parsed_response
-
-
-    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
-        """
-            Method to delete vApp
-                Args :
-                    vapp_uuid - vApp UUID
-                    msg - Error message to be logged
-                    exp_type : Exception type
-                Returns:
-                    None
-        """
-        if vapp_uuid:
-            status = self.delete_vminstance(vapp_uuid)
-        else:
-            msg = "No vApp ID"
-        self.logger.error(msg)
-        if exp_type == "Genric":
-            raise vimconn.vimconnException(msg)
-        elif exp_type == "NotFound":
-            raise vimconn.vimconnNotFoundException(message=msg)
-
-    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
-        """
-            Method to attach SRIOV adapters to VM
-
-             Args:
-                vapp_uuid - uuid of vApp/VM
-                sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
-                vmname_andid - vmname
-
-            Returns:
-                The status of add SRIOV adapter task , vm object and
-                vcenter_conect object
-        """
-        vm_obj = None
-        vcenter_conect, content = self.get_vcenter_content()
-        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
-
-        if vm_moref_id:
-            try:
-                no_of_sriov_devices = len(sriov_nets)
-                if no_of_sriov_devices > 0:
-                    #Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
-                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
-                    if host_obj and vm_obj:
-                        #get SRIOV devies from host on which vapp is currently installed
-                        avilable_sriov_devices = self.get_sriov_devices(host_obj,
-                                                                no_of_sriov_devices,
-                                                                )
-
-                        if len(avilable_sriov_devices) == 0:
-                            #find other hosts with active pci devices
-                            new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
-                                                                content,
-                                                                no_of_sriov_devices,
-                                                                )
-
-                            if new_host_obj is not None and len(avilable_sriov_devices)> 0:
-                                #Migrate vm to the host where SRIOV devices are available
-                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
-                                                                                    new_host_obj))
-                                task = self.relocate_vm(new_host_obj, vm_obj)
-                                if task is not None:
-                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
-                                    self.logger.info("Migrate VM status: {}".format(result))
-                                    host_obj = new_host_obj
-                                else:
-                                    self.logger.info("Fail to migrate VM : {}".format(result))
-                                    raise vimconn.vimconnNotFoundException(
-                                    "Fail to migrate VM : {} to host {}".format(
-                                                    vmname_andid,
-                                                    new_host_obj)
-                                        )
-
-                        if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
-                            #Add SRIOV devices one by one
-                            for sriov_net in sriov_nets:
-                                network_name = sriov_net.get('net_id')
-                                dvs_portgr_name = self.create_dvPort_group(network_name)
-                                if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
-                                    #add vlan ID ,Modify portgroup for vlan ID
-                                    self.configure_vlanID(content, vcenter_conect, network_name)
-
-                                task = self.add_sriov_to_vm(content,
-                                                            vm_obj,
-                                                            host_obj,
-                                                            network_name,
-                                                            avilable_sriov_devices[0]
-                                                            )
-                                if task:
-                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
-                                    if status:
-                                        self.logger.info("Added SRIOV {} to VM {}".format(
-                                                                        no_of_sriov_devices,
-                                                                        str(vm_obj)))
-                                else:
-                                    self.logger.error("Fail to add SRIOV {} to VM {}".format(
-                                                                        no_of_sriov_devices,
-                                                                        str(vm_obj)))
-                                    raise vimconn.vimconnUnexpectedResponse(
-                                    "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
-                                        )
-                            return True, vm_obj, vcenter_conect
-                        else:
-                            self.logger.error("Currently there is no host with"\
-                                              " {} number of avaialble SRIOV "\
-                                              "VFs required for VM {}".format(
-                                                                no_of_sriov_devices,
-                                                                vmname_andid)
-                                              )
-                            raise vimconn.vimconnNotFoundException(
-                                    "Currently there is no host with {} "\
-                                    "number of avaialble SRIOV devices required for VM {}".format(
-                                                                            no_of_sriov_devices,
-                                                                            vmname_andid))
-                else:
-                    self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
-
-            except vmodl.MethodFault as error:
-                self.logger.error("Error occurred while adding SRIOV {} ",error)
-        return None, vm_obj, vcenter_conect
-
-
-    def get_sriov_devices(self,host, no_of_vfs):
-        """
-            Method to get the details of SRIOV devices on given host
-             Args:
-                host - vSphere host object
-                no_of_vfs - number of VFs needed on host
-
-             Returns:
-                array of SRIOV devices
-        """
-        sriovInfo=[]
-        if host:
-            for device in host.config.pciPassthruInfo:
-                if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
-                    if device.numVirtualFunction >= no_of_vfs:
-                        sriovInfo.append(device)
-                        break
-        return sriovInfo
-
-
-    def get_host_and_sriov_devices(self, content, no_of_vfs):
-        """
-         Method to get the details of SRIOV devices infromation on all hosts
-
-            Args:
-                content - vSphere host object
-                no_of_vfs - number of pci VFs needed on host
-
-            Returns:
-                 array of SRIOV devices and host object
-        """
-        host_obj = None
-        sriov_device_objs = None
-        try:
-            if content:
-                container = content.viewManager.CreateContainerView(content.rootFolder,
-                                                            [vim.HostSystem], True)
-                for host in container.view:
-                    devices = self.get_sriov_devices(host, no_of_vfs)
-                    if devices:
-                        host_obj = host
-                        sriov_device_objs = devices
-                        break
-        except Exception as exp:
-            self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
-
-        return host_obj,sriov_device_objs
-
-
-    def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
-        """
-         Method to add SRIOV adapter to vm
-
-            Args:
-                host_obj - vSphere host object
-                vm_obj - vSphere vm object
-                content - vCenter content object
-                network_name - name of distributed virtaul portgroup
-                sriov_device - SRIOV device info
-
-            Returns:
-                 task object
-        """
-        devices = []
-        vnic_label = "sriov nic"
-        try:
-            dvs_portgr = self.get_dvport_group(network_name)
-            network_name = dvs_portgr.name
-            nic = vim.vm.device.VirtualDeviceSpec()
-            # VM device
-            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
-            nic.device = vim.vm.device.VirtualSriovEthernetCard()
-            nic.device.addressType = 'assigned'
-            #nic.device.key = 13016
-            nic.device.deviceInfo = vim.Description()
-            nic.device.deviceInfo.label = vnic_label
-            nic.device.deviceInfo.summary = network_name
-            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
-
-            nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
-            nic.device.backing.deviceName = network_name
-            nic.device.backing.useAutoDetect = False
-            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
-            nic.device.connectable.startConnected = True
-            nic.device.connectable.allowGuestControl = True
-
-            nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
-            nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
-            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
-
-            devices.append(nic)
-            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
-            task = vm_obj.ReconfigVM_Task(vmconf)
-            return task
-        except Exception as exp:
-            self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
-            return None
-
-
-    def create_dvPort_group(self, network_name):
-        """
-         Method to create disributed virtual portgroup
-
-            Args:
-                network_name - name of network/portgroup
-
-            Returns:
-                portgroup key
-        """
-        try:
-            new_network_name = [network_name, '-', str(uuid.uuid4())]
-            network_name=''.join(new_network_name)
-            vcenter_conect, content = self.get_vcenter_content()
-
-            dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
-            if dv_switch:
-                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
-                dv_pg_spec.name = network_name
-
-                dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
-                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
-                dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
-                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
-                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
-                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
-
-                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
-                self.wait_for_vcenter_task(task, vcenter_conect)
-
-                dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
-                if dvPort_group:
-                    self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
-                    return dvPort_group.key
-            else:
-                self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
-
-        except Exception as exp:
-            self.logger.error("Error occurred while creating disributed virtaul port group {}"\
-                             " : {}".format(network_name, exp))
-        return None
-
-    def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
-        """
-         Method to reconfigure disributed virtual portgroup
-
-            Args:
-                dvPort_group_name - name of disributed virtual portgroup
-                content - vCenter content object
-                config_info - disributed virtual portgroup configuration
-
-            Returns:
-                task object
-        """
-        try:
-            dvPort_group = self.get_dvport_group(dvPort_group_name)
-            if dvPort_group:
-                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
-                dv_pg_spec.configVersion = dvPort_group.config.configVersion
-                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
-                if "vlanID" in config_info:
-                    dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
-                    dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
-
-                task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
-                return task
-            else:
-                return None
-        except Exception as exp:
-            self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
-                             " : {}".format(dvPort_group_name, exp))
-            return None
-
-
-    def destroy_dvport_group(self , dvPort_group_name):
-        """
-         Method to destroy disributed virtual portgroup
-
-            Args:
-                network_name - name of network/portgroup
-
-            Returns:
-                True if portgroup successfully got deleted else false
-        """
-        vcenter_conect, content = self.get_vcenter_content()
-        try:
-            status = None
-            dvPort_group = self.get_dvport_group(dvPort_group_name)
-            if dvPort_group:
-                task = dvPort_group.Destroy_Task()
-                status = self.wait_for_vcenter_task(task, vcenter_conect)
-            return status
-        except vmodl.MethodFault as exp:
-            self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
-                                                                    exp, dvPort_group_name))
-            return None
-
-
-    def get_dvport_group(self, dvPort_group_name):
-        """
-        Method to get disributed virtual portgroup
-
-            Args:
-                network_name - name of network/portgroup
-
-            Returns:
-                portgroup object
-        """
-        vcenter_conect, content = self.get_vcenter_content()
-        dvPort_group = None
-        try:
-            container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
-            for item in container.view:
-                if item.key == dvPort_group_name:
-                    dvPort_group = item
-                    break
-            return dvPort_group
-        except vmodl.MethodFault as exp:
-            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
-                                                                            exp, dvPort_group_name))
-            return None
-
-    def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
-        """
-         Method to get disributed virtual portgroup vlanID
-
-            Args:
-                network_name - name of network/portgroup
-
-            Returns:
-                vlan ID
-        """
-        vlanId = None
-        try:
-            dvPort_group = self.get_dvport_group(dvPort_group_name)
-            if dvPort_group:
-                vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
-        except vmodl.MethodFault as exp:
-            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
-                                                                            exp, dvPort_group_name))
-        return vlanId
-
-
-    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
-        """
-         Method to configure vlanID in disributed virtual portgroup vlanID
-
-            Args:
-                network_name - name of network/portgroup
-
-            Returns:
-                None
-        """
-        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
-        if vlanID == 0:
-            #configure vlanID
-            vlanID = self.genrate_vlanID(dvPort_group_name)
-            config = {"vlanID":vlanID}
-            task = self.reconfig_portgroup(content, dvPort_group_name,
-                                    config_info=config)
-            if task:
-                status= self.wait_for_vcenter_task(task, vcenter_conect)
-                if status:
-                    self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
-                                                        dvPort_group_name,vlanID))
-            else:
-                self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
-                                        dvPort_group_name, vlanID))
-
-
-    def genrate_vlanID(self, network_name):
-        """
-         Method to get unused vlanID
-            Args:
-                network_name - name of network/portgroup
-            Returns:
-                vlanID
-        """
-        vlan_id = None
-        used_ids = []
-        if self.config.get('vlanID_range') == None:
-            raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
-                        "at config value before creating sriov network with vlan tag")
-        if "used_vlanIDs" not in self.persistent_info:
-                self.persistent_info["used_vlanIDs"] = {}
-        else:
-            used_ids = self.persistent_info["used_vlanIDs"].values()
-            #For python3
-            #used_ids = list(self.persistent_info["used_vlanIDs"].values())
-
-        for vlanID_range in self.config.get('vlanID_range'):
-            start_vlanid , end_vlanid = vlanID_range.split("-")
-            if start_vlanid > end_vlanid:
-                raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
-                                                                        vlanID_range))
-
-            for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
-            #For python3
-            #for id in range(int(start_vlanid), int(end_vlanid) + 1):
-                if id not in used_ids:
-                    vlan_id = id
-                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
-                    return vlan_id
-        if vlan_id is None:
-            raise vimconn.vimconnConflictException("All Vlan IDs are in use")
-
-
-    def get_obj(self, content, vimtype, name):
-        """
-         Get the vsphere object associated with a given text name
-        """
-        obj = None
-        container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
-        for item in container.view:
-            if item.name == name:
-                obj = item
-                break
-        return obj
-
-
-    def insert_media_to_vm(self, vapp, image_id):
-        """
-        Method to insert media CD-ROM (ISO image) from catalog to vm.
-        vapp - vapp object to get vm id
-        Image_id - image id for cdrom to be inerted to vm
-        """
-        # create connection object
-        vca = self.connect()
-        try:
-            # fetching catalog details
-            rest_url = "{}/api/catalog/{}".format(self.url, image_id)
-            if vca._session:
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-                response = self.perform_request(req_type='GET',
-                                                url=rest_url,
-                                                headers=headers)
-
-            if response.status_code != 200:
-                self.logger.error("REST call {} failed reason : {}"\
-                             "status code : {}".format(url_rest_call,
-                                                    response.content,
-                                               response.status_code))
-                raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
-                                                                    "catalog details")
-            # searching iso name and id
-            iso_name,media_id = self.get_media_details(vca, response.content)
-
-            if iso_name and media_id:
-                data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-                     <ns6:MediaInsertOrEjectParams
-                     xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" 
-                     xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" 
-                     xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" 
-                     xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" 
-                     xmlns:ns6="http://www.vmware.com/vcloud/v1.5" 
-                     xmlns:ns7="http://www.vmware.com/schema/ovf" 
-                     xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" 
-                     xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
-                     <ns6:Media
-                        type="application/vnd.vmware.vcloud.media+xml"
-                        name="{}"
-                        id="urn:vcloud:media:{}"
-                        href="https://{}/api/media/{}"/>
-                     </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
-                                                                self.url,media_id)
-
-                for vms in vapp.get_all_vms():
-                    vm_id = vms.get('id').split(':')[-1]
-
-                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
-                    rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
-
-                    response = self.perform_request(req_type='POST',
-                                                       url=rest_url,
-                                                          data=data,
-                                                    headers=headers)
-
-                    if response.status_code != 202:
-                        error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
-                                    "Status code {}".format(response.text, response.status_code)
-                        self.logger.error(error_msg)
-                        raise vimconn.vimconnException(error_msg)
-                    else:
-                        task = self.get_task_from_response(response.content)
-                        result = self.client.get_task_monitor().wait_for_success(task=task)
-                        if result.get('status') == 'success':
-                            self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
-                                                                    " image to vm {}".format(vm_id))
-
-        except Exception as exp:
-            self.logger.error("insert_media_to_vm() : exception occurred "\
-                                            "while inserting media CD-ROM")
-            raise vimconn.vimconnException(message=exp)
-
-
-    def get_media_details(self, vca, content):
-        """
-        Method to get catalog item details
-        vca - connection object
-        content - Catalog details
-        Return - Media name, media id
-        """
-        cataloghref_list = []
-        try:
-            if content:
-                vm_list_xmlroot = XmlElementTree.fromstring(content)
-                for child in vm_list_xmlroot.iter():
-                    if 'CatalogItem' in child.tag:
-                        cataloghref_list.append(child.attrib.get('href'))
-                if cataloghref_list is not None:
-                    for href in cataloghref_list:
-                        if href:
-                            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-                            response = self.perform_request(req_type='GET',
-                                                                  url=href,
-                                                           headers=headers)
-                            if response.status_code != 200:
-                                self.logger.error("REST call {} failed reason : {}"\
-                                             "status code : {}".format(href,
-                                                           response.content,
-                                                      response.status_code))
-                                raise vimconn.vimconnException("get_media_details : Failed to get "\
-                                                                         "catalogitem details")
-                            list_xmlroot = XmlElementTree.fromstring(response.content)
-                            for child in list_xmlroot.iter():
-                                if 'Entity' in child.tag:
-                                    if 'media' in child.attrib.get('href'):
-                                        name = child.attrib.get('name')
-                                        media_id = child.attrib.get('href').split('/').pop()
-                                        return name,media_id
-                            else:
-                                self.logger.debug("Media name and id not found")
-                                return False,False
-        except Exception as exp:
-            self.logger.error("get_media_details : exception occurred "\
-                                               "getting media details")
-            raise vimconn.vimconnException(message=exp)
-
-
-    def retry_rest(self, method, url, add_headers=None, data=None):
-        """ Method to get Token & retry respective REST request
-            Args:
-                api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
-                url - request url to be used
-                add_headers - Additional headers (optional)
-                data - Request payload data to be passed in request
-            Returns:
-                response - Response of request
-        """
-        response = None
-
-        #Get token
-        self.get_token()
-
-        if self.client._session:
-                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-        if add_headers:
-            headers.update(add_headers)
-
-        if method == 'GET':
-            response = self.perform_request(req_type='GET',
-                                            url=url,
-                                            headers=headers)
-        elif method == 'PUT':
-            response = self.perform_request(req_type='PUT',
-                                            url=url,
-                                            headers=headers,
-                                            data=data)
-        elif method == 'POST':
-            response = self.perform_request(req_type='POST',
-                                            url=url,
-                                            headers=headers,
-                                            data=data)
-        elif method == 'DELETE':
-            response = self.perform_request(req_type='DELETE',
-                                            url=url,
-                                            headers=headers)
-        return response
-
-
-    def get_token(self):
-        """ Generate a new token if expired
-
-            Returns:
-                The return client object that letter can be used to connect to vCloud director as admin for VDC
-        """
-        try:
-            self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
-                                                                                      self.user,
-                                                                                      self.org_name))
-            host = self.url
-            client = Client(host, verify_ssl_certs=False)
-            client.set_highest_supported_version()
-            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
-            # connection object
-            self.client = client
-
-        except:
-            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
-                                                     "{} as user: {}".format(self.org_name, self.user))
-
-        if not client:
-            raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
-
-
-    def get_vdc_details(self):
-        """ Get VDC details using pyVcloud Lib
-
-            Returns org and vdc object
-        """
-        vdc = None
-        try:
-            org = Org(self.client, resource=self.client.get_org())
-            vdc = org.get_vdc(self.tenant_name)
-        except Exception as e:
-            # pyvcloud not giving a specific exception, Refresh nevertheless
-            self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
-
-        #Retry once, if failed by refreshing token
-        if vdc is None:
-            self.get_token()
-            org = Org(self.client, resource=self.client.get_org())
-            vdc = org.get_vdc(self.tenant_name)
-
-        return org, vdc
-
-
-    def perform_request(self, req_type, url, headers=None, data=None):
-        """Perform the POST/PUT/GET/DELETE request."""
-
-        #Log REST request details
-        self.log_request(req_type, url=url, headers=headers, data=data)
-        # perform request and return its result
-        if req_type == 'GET':
-            response = requests.get(url=url,
-                                headers=headers,
-                                verify=False)
-        elif req_type == 'PUT':
-            response = requests.put(url=url,
-                                headers=headers,
-                                data=data,
-                                verify=False)
-        elif req_type == 'POST':
-            response = requests.post(url=url,
-                                 headers=headers,
-                                 data=data,
-                                 verify=False)
-        elif req_type == 'DELETE':
-            response = requests.delete(url=url,
-                                 headers=headers,
-                                 verify=False)
-        #Log the REST response
-        self.log_response(response)
-
-        return response
-
-
-    def log_request(self, req_type, url=None, headers=None, data=None):
-        """Logs REST request details"""
-
-        if req_type is not None:
-            self.logger.debug("Request type: {}".format(req_type))
-
-        if url is not None:
-            self.logger.debug("Request url: {}".format(url))
-
-        if headers is not None:
-            for header in headers:
-                self.logger.debug("Request header: {}: {}".format(header, headers[header]))
-
-        if data is not None:
-            self.logger.debug("Request data: {}".format(data))
-
-
-    def log_response(self, response):
-        """Logs REST response details"""
-
-        self.logger.debug("Response status code: {} ".format(response.status_code))
-
-
-    def get_task_from_response(self, content):
-        """
-        content - API response content(response.content)
-        return task object
-        """
-        xmlroot = XmlElementTree.fromstring(content)
-        if xmlroot.tag.split('}')[1] == "Task":
-            return xmlroot
-        else:
-            for ele in xmlroot:
-                if ele.tag.split("}")[1] == "Tasks":
-                    task = ele[0]
-                    break
-            return task
-
-
-    def power_on_vapp(self,vapp_id, vapp_name):
-        """
-        vapp_id - vApp uuid
-        vapp_name - vAapp name
-        return - Task object
-        """
-        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
-                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
-
-        poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
-                                                                          vapp_id)
-        response = self.perform_request(req_type='POST',
-                                       url=poweron_href,
-                                        headers=headers)
-
-        if response.status_code != 202:
-            self.logger.error("REST call {} failed reason : {}"\
-                         "status code : {} ".format(poweron_href,
-                                                response.content,
-                                           response.status_code))
-            raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
-                                                      "vApp {}".format(vapp_name))
-        else:
-            poweron_task = self.get_task_from_response(response.content)
-            return poweron_task
-
-
diff --git a/osm_ro/vmwarecli.py b/osm_ro/vmwarecli.py
deleted file mode 100755 (executable)
index 80fe394..0000000
+++ /dev/null
@@ -1,819 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-##
-# This file is standalone vmware vcloud director util
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: mbayramov@vmware.com
-##
-
-"""
-
-Standalone application that leverage openmano vmware connector work with vCloud director rest api.
-
- - Provides capability to create and delete VDC for specific organization.
- - Create, delete and manage network for specific VDC
- - List deployed VM's , VAPPs, VDSs, Organization
- - View detail information about VM / Vapp , Organization etc
- - Operate with images upload / boot / power on etc
-
- Usage example.
-
- List organization created in vCloud director
-  vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list org
-
- List VDC for particular organization
-  vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list vdc
-
- Upload image
-  python vmwarerecli.py image upload /Users/spyroot/Developer/Openmano/Ro/vnfs/cirros/cirros.ovf
-
- Boot Image
-    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF image boot cirros cirros
-
- View vApp
-    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF view vapp 90bd2b4e-f782-46cf-b5e2-c3817dcf6633 -u
-
- List VMS
-    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vms
-
- List VDC in OSM format
-  python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vdc -o
-
-Mustaafa Bayramov
-mbayramov@vmware.com
-"""
-import os
-import argparse
-import traceback
-import uuid
-
-from xml.etree import ElementTree as ET
-
-import sys
-from pyvcloud import Http
-
-import logging
-import vimconn
-import time
-import uuid
-import urllib3
-import requests
-
-from vimconn_vmware import vimconnector
-from requests.packages.urllib3.exceptions import InsecureRequestWarning
-from prettytable import PrettyTable
-
-requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-
-__author__ = "Mustafa Bayramov"
-__date__ = "$16-Sep-2016 11:09:29$"
-
-
-# TODO move to main vim
-def delete_network_action(vca=None, network_uuid=None):
-    """
-    Method leverages vCloud director and query network based on network uuid
-
-    Args:
-        vca - is active VCA connection.
-        network_uuid - is a network uuid
-
-        Returns:
-            The return XML respond
-    """
-
-    if vca is None or network_uuid is None:
-        return None
-
-    url_list = [vca.host, '/api/admin/network/', network_uuid]
-    vm_list_rest_call = ''.join(url_list)
-
-    if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-        response = Http.get(url=vm_list_rest_call,
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
-        if response.status_code == requests.codes.ok:
-            print response.content
-            return response.content
-
-    return None
-
-
-def print_vapp(vapp_dict=None):
-    """ Method takes vapp_dict and print in tabular format
-
-    Args:
-        vapp_dict: container vapp object.
-
-        Returns:
-            The return nothing
-    """
-
-    # following key available to print
-    # {'status': 'POWERED_OFF', 'storageProfileName': '*', 'hardwareVersion': '7', 'vmToolsVersion': '0',
-    #  'memoryMB': '384',
-    #  'href': 'https://172.16.254.206/api/vAppTemplate/vm-129e22e8-08dc-4cb6-8358-25f635e65d3b',
-    #  'isBusy': 'false', 'isDeployed': 'false', 'isInMaintenanceMode': 'false', 'isVAppTemplate': 'true',
-    #  'networkName': 'nat', 'isDeleted': 'false', 'catalogName': 'Cirros',
-    #  'containerName': 'Cirros Template', #  'container':
-    #  'https://172.16.254.206/api/vAppTemplate/vappTemplate-b966453d-c361-4505-9e38-ccef45815e5d',
-    #  'name': 'Cirros', 'pvdcHighestSupportedHardwareVersion': '11', 'isPublished': 'false',
-    #  'numberOfCpus': '1', 'vdc': 'https://172.16.254.206/api/vdc/a5056f85-418c-4bfd-8041-adb0f48be9d9',
-    #  'guestOs': 'Other (32-bit)', 'isVdcEnabled': 'true'}
-
-    if vapp_dict is None:
-        return
-
-    vm_table = PrettyTable(['vm   uuid',
-                            'vapp name',
-                            'vapp uuid',
-                            'network name',
-                            'storage name',
-                            'vcpu', 'memory', 'hw ver','deployed','status'])
-    for k in vapp_dict:
-        entry = []
-        entry.append(k)
-        entry.append(vapp_dict[k]['containerName'])
-        # vm-b1f5cd4c-2239-4c89-8fdc-a41ff18e0d61
-        entry.append(vapp_dict[k]['container'].split('/')[-1:][0][5:])
-        entry.append(vapp_dict[k]['networkName'])
-        entry.append(vapp_dict[k]['storageProfileName'])
-        entry.append(vapp_dict[k]['numberOfCpus'])
-        entry.append(vapp_dict[k]['memoryMB'])
-        entry.append(vapp_dict[k]['pvdcHighestSupportedHardwareVersion'])
-        entry.append(vapp_dict[k]['isDeployed'])
-        entry.append(vapp_dict[k]['status'])
-
-        vm_table.add_row(entry)
-
-    print vm_table
-
-
-def print_org(org_dict=None):
-    """ Method takes vapp_dict and print in tabular format
-
-    Args:
-        org_dict:  dictionary of organization where key is org uuid.
-
-        Returns:
-            The return nothing
-    """
-
-    if org_dict is None:
-        return
-
-    org_table = PrettyTable(['org uuid', 'name'])
-    for k in org_dict:
-        entry = [k, org_dict[k]]
-        org_table.add_row(entry)
-
-    print org_table
-
-
-def print_vm_list(vm_dict=None):
-    """ Method takes vapp_dict and print in tabular format
-
-    Args:
-        vm_dict:  dictionary of organization where key is org uuid.
-
-        Returns:
-            The return nothing
-    """
-    if vm_dict is None:
-        return
-
-    vm_table = PrettyTable(
-        ['vm uuid', 'vm name', 'vapp uuid', 'vdc uuid', 'network name', 'is deployed', 'vcpu', 'memory', 'status'])
-
-    try:
-        for k in vm_dict:
-            entry = []
-            entry.append(k)
-            entry.append(vm_dict[k]['name'])
-            entry.append(vm_dict[k]['container'].split('/')[-1:][0][5:])
-            entry.append(vm_dict[k]['vdc'].split('/')[-1:][0])
-            entry.append(vm_dict[k]['networkName'])
-            entry.append(vm_dict[k]['isDeployed'])
-            entry.append(vm_dict[k]['numberOfCpus'])
-            entry.append(vm_dict[k]['memoryMB'])
-            entry.append(vm_dict[k]['status'])
-            vm_table.add_row(entry)
-        print vm_table
-    except KeyError:
-        logger.error("wrong key {}".format(KeyError.message))
-        pass
-
-
-def print_vdc_list(org_dict=None):
-    """ Method takes vapp_dict and print in tabular format
-
-    Args:
-        org_dict:  dictionary of organization where key is org uuid.
-
-        Returns:
-            The return nothing
-    """
-    if org_dict is None:
-        return
-    try:
-        vdcs_dict = {}
-        if org_dict.has_key('vdcs'):
-            vdcs_dict = org_dict['vdcs']
-        vdc_table = PrettyTable(['vdc uuid', 'vdc name'])
-        for k in vdcs_dict:
-            entry = [k, vdcs_dict[k]]
-            vdc_table.add_row(entry)
-
-        print vdc_table
-    except KeyError:
-        logger.error("wrong key {}".format(KeyError.message))
-        logger.logger.debug(traceback.format_exc())
-
-
-def print_network_list(org_dict=None):
-    """ Method print network list.
-
-    Args:
-        org_dict:   dictionary of organization that contain key networks with a list of all
-                    network for for specific VDC
-
-        Returns:
-            The return nothing
-    """
-    if org_dict is None:
-        return
-    try:
-        network_dict = {}
-        if org_dict.has_key('networks'):
-            network_dict = org_dict['networks']
-        network_table = PrettyTable(['network uuid', 'network name'])
-        for k in network_dict:
-            entry = [k, network_dict[k]]
-            network_table.add_row(entry)
-
-        print network_table
-
-    except KeyError:
-        logger.error("wrong key {}".format(KeyError.message))
-        logger.logger.debug(traceback.format_exc())
-
-
-def print_org_details(org_dict=None):
-    """ Method takes vapp_dict and print in tabular format
-
-    Args:
-        org_dict:  dictionary of organization where key is org uuid.
-
-        Returns:
-            The return nothing
-    """
-    if org_dict is None:
-        return
-    try:
-        catalogs_dict = {}
-
-        print_vdc_list(org_dict=org_dict)
-        print_network_list(org_dict=org_dict)
-
-        if org_dict.has_key('catalogs'):
-            catalogs_dict = org_dict['catalogs']
-
-        catalog_table = PrettyTable(['catalog uuid', 'catalog name'])
-        for k in catalogs_dict:
-            entry = [k, catalogs_dict[k]]
-            catalog_table.add_row(entry)
-
-        print catalog_table
-
-    except KeyError:
-        logger.error("wrong key {}".format(KeyError.message))
-        logger.logger.debug(traceback.format_exc())
-
-
-def delete_actions(vim=None, action=None, namespace=None):
-    if action == 'network' or namespace.action == 'network':
-        logger.debug("Requesting delete for network {}".format(namespace.network_name))
-        network_uuid = namespace.network_name
-        # if request name based we need find UUID
-        # TODO optimize it or move to external function
-        if not namespace.uuid:
-            org_dict = vim.get_org_list()
-            for org in org_dict:
-                org_net = vim.get_org(org)['networks']
-                for network in org_net:
-                    if org_net[network] == namespace.network_name:
-                        network_uuid = network
-
-        vim.delete_network_action(network_uuid=network_uuid)
-
-
-def list_actions(vim=None, action=None, namespace=None):
-    """ Method provide list object from VDC action
-
-       Args:
-           vim - is vcloud director vim connector.
-           action - is action for list ( vdc / org etc)
-           namespace -  must contain VDC / Org information.
-
-           Returns:
-               The return nothing
-       """
-
-    org_id = None
-    myorgs = vim.get_org_list()
-    for org in myorgs:
-        if myorgs[org] == namespace.vcdorg:
-            org_id = org
-        break
-    else:
-        print(" Invalid organization.")
-        return
-
-    if action == 'vms' or namespace.action == 'vms':
-        vm_dict = vim.get_vm_list(vdc_name=namespace.vcdvdc)
-        print_vm_list(vm_dict=vm_dict)
-    elif action == 'vapps' or namespace.action == 'vapps':
-        vapp_dict = vim.get_vapp_list(vdc_name=namespace.vcdvdc)
-        print_vapp(vapp_dict=vapp_dict)
-    elif action == 'networks' or namespace.action == 'networks':
-        if namespace.osm:
-            osm_print(vim.get_network_list(filter_dict={}))
-        else:
-            print_network_list(vim.get_org(org_uuid=org_id))
-    elif action == 'vdc' or namespace.action == 'vdc':
-        if namespace.osm:
-            osm_print(vim.get_tenant_list(filter_dict=None))
-        else:
-            print_vdc_list(vim.get_org(org_uuid=org_id))
-    elif action == 'org' or namespace.action == 'org':
-        print_org(org_dict=vim.get_org_list())
-    else:
-        return None
-
-
-def print_network_details(network_dict=None):
-    try:
-        network_table = PrettyTable(network_dict.keys())
-        entry = [network_dict.values()]
-        network_table.add_row(entry[0])
-        print network_table
-    except KeyError:
-        logger.error("wrong key {}".format(KeyError.message))
-        logger.logger.debug(traceback.format_exc())
-
-
-def osm_print(generic_dict=None):
-
-    try:
-        for element in generic_dict:
-            table = PrettyTable(element.keys())
-            entry = [element.values()]
-            table.add_row(entry[0])
-        print table
-    except KeyError:
-        logger.error("wrong key {}".format(KeyError.message))
-        logger.logger.debug(traceback.format_exc())
-
-
-def view_actions(vim=None, action=None, namespace=None):
-    org_id = None
-    orgs = vim.get_org_list()
-    for org in orgs:
-        if orgs[org] == namespace.vcdorg:
-            org_id = org
-        break
-    else:
-        print(" Invalid organization.")
-        return
-
-    myorg = vim.get_org(org_uuid=org_id)
-
-    # view org
-    if action == 'org' or namespace.action == 'org':
-        org_id = None
-        orgs = vim.get_org_list()
-        if namespace.uuid:
-            if namespace.org_name in orgs:
-                org_id = namespace.org_name
-        else:
-            # we need find UUID based on name provided
-            for org in orgs:
-                if orgs[org] == namespace.org_name:
-                    org_id = org
-                    break
-
-        logger.debug("Requesting view for orgs {}".format(org_id))
-        print_org_details(vim.get_org(org_uuid=org_id))
-
-    # view vapp action
-    if action == 'vapp' or namespace.action == 'vapp':
-        if namespace.vapp_name is not None and namespace.uuid:
-            logger.debug("Requesting vapp {} for vdc {}".format(namespace.vapp_name, namespace.vcdvdc))
-            vapp_dict = {}
-            vapp_uuid = namespace.vapp_name
-            # if request based on just name we need get UUID
-            if not namespace.uuid:
-                vapp_uuid = vim.get_vappid(vdc=namespace.vcdvdc, vapp_name=namespace.vapp_name)
-                if vapp_uuid is None:
-                    print("Can't find vapp by given name {}".format(namespace.vapp_name))
-                    return
-
-            print " namespace {}".format(namespace)
-            if vapp_dict is not None and namespace.osm:
-                vm_info_dict = vim.get_vminstance(vim_vm_uuid=vapp_uuid)
-                print vm_info_dict
-            if vapp_dict is not None and namespace.osm != True:
-                vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vapp_uuid, isuuid=True)
-                print_vapp(vapp_dict=vapp_dict)
-
-    # view network
-    if action == 'network' or namespace.action == 'network':
-        logger.debug("Requesting view for network {}".format(namespace.network_name))
-        network_uuid = namespace.network_name
-        # if request name based we need find UUID
-        # TODO optimize it or move to external function
-        if not namespace.uuid:
-            if not myorg.has_key('networks'):
-                print("Network {} is undefined in vcloud director for org {} vdc {}".format(namespace.network_name,
-                                                                                            vim.name,
-                                                                                            vim.tenant_name))
-                return
-
-            my_org_net = myorg['networks']
-            for network in my_org_net:
-                if my_org_net[network] == namespace.network_name:
-                    network_uuid = network
-                    break
-
-        print print_network_details(network_dict=vim.get_vcd_network(network_uuid=network_uuid))
-
-
-def create_actions(vim=None, action=None, namespace=None):
-    """Method gets provider vdc view from vcloud director
-
-        Args:
-            vim - is Cloud director vim connector
-            action - action for create ( network / vdc etc)
-
-        Returns:
-            The return xml content of respond or None
-    """
-    if action == 'network' or namespace.action == 'network':
-        logger.debug("Creating a network in vcloud director".format(namespace.network_name))
-        network_uuid = vim.create_network(namespace.network_name)
-        if network_uuid is not None:
-            print ("Crated new network {} and uuid: {}".format(namespace.network_name, network_uuid))
-        else:
-            print ("Failed create a new network {}".format(namespace.network_name))
-    elif action == 'vdc' or namespace.action == 'vdc':
-        logger.debug("Creating a new vdc in vcloud director.".format(namespace.vdc_name))
-        vdc_uuid = vim.create_vdc(namespace.vdc_name)
-        if vdc_uuid is not None:
-            print ("Crated new vdc {} and uuid: {}".format(namespace.vdc_name, vdc_uuid))
-        else:
-            print ("Failed create a new vdc {}".format(namespace.vdc_name))
-    else:
-        return None
-
-
-def validate_uuid4(uuid_string):
-    """Function validate that string contain valid uuid4
-
-        Args:
-            uuid_string - valid UUID string
-
-        Returns:
-            The return true if string contain valid UUID format
-    """
-    try:
-        val = uuid.UUID(uuid_string, version=4)
-    except ValueError:
-        return False
-    return True
-
-
-def upload_image(vim=None, image_file=None):
-    """Function upload image to vcloud director
-
-        Args:
-            image_file - valid UUID string
-
-        Returns:
-            The return true if image uploaded correctly
-    """
-    try:
-        catalog_uuid = vim.get_image_id_from_path(path=image_file, progress=True)
-        if catalog_uuid is not None and validate_uuid4(catalog_uuid):
-            print("Image uploaded and uuid {}".format(catalog_uuid))
-            return True
-    except vimconn.vimconnException as upload_exception:
-        print("Failed uploaded {} image".format(image_file))
-        print("Error Reason: {}".format(upload_exception.message))
-    return False
-
-
-def boot_image(vim=None, image_name=None, vm_name=None):
-    """ Function boot image that resided in vcloud director.
-        The image name can be UUID of name.
-
-        Args:
-            vim - vim connector
-            image_name - image identified by UUID or text string.
-            vm_name - vmname
-
-
-         Returns:
-             The return true if image uploaded correctly
-     """
-
-    vim_catalog = None
-    try:
-        catalogs = vim.vca.get_catalogs()
-        if not validate_uuid4(image_name):
-            vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
-            if vim_catalog is None:
-                return None
-        else:
-            vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
-            if vim_catalog is None:
-                return None
-
-        print (" Booting {} image id {} ".format(vm_name, vim_catalog))
-        vm_uuid, _ = vim.new_vminstance(name=vm_name, image_id=vim_catalog)
-        if vm_uuid is not None and validate_uuid4(vm_uuid):
-            print("Image booted and vm uuid {}".format(vm_uuid))
-            vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vm_uuid, isuuid=True)
-            if vapp_dict is not None:
-                print_vapp(vapp_dict=vapp_dict)
-        return True
-    except vimconn.vimconnNotFoundException as notFound:
-        print("Failed boot {} image".format(image_name))
-        print(notFound.message)
-    except vimconn.vimconnException as vimconError:
-        print("Failed boot {} image".format(image_name))
-        print(vimconError.message)
-    except:
-        print("Failed boot {} image".format(image_name))
-
-
-        return False
-
-
-def image_action(vim=None, action=None, namespace=None):
-    """ Function present set of action to manipulate with image.
-          - upload image
-          - boot image.
-          - delete image ( not yet done )
-
-        Args:
-             vim - vcloud director connector
-             action - string (upload/boot etc)
-             namespace - contain other attributes image name etc
-
-         Returns:
-             The return nothing
-     """
-
-    if action == 'upload' or namespace.action == 'upload':
-        upload_image(vim=vim, image_file=namespace.image)
-    elif action == 'boot' or namespace.action == 'boot':
-        boot_image(vim=vim, image_name=namespace.image, vm_name=namespace.vmname)
-    else:
-        return None
-
-
-def vmwarecli(command=None, action=None, namespace=None):
-    logger.debug("Namespace {}".format(namespace))
-    urllib3.disable_warnings()
-
-    vcduser = None
-    vcdpasword = None
-    vcdhost = None
-    vcdorg = None
-
-    if hasattr(__builtins__, 'raw_input'):
-        input = raw_input
-
-    if namespace.vcdvdc is None:
-        while True:
-            vcduser = input("Enter vcd username: ")
-            if vcduser is not None and len(vcduser) > 0:
-                break
-    else:
-        vcduser = namespace.vcduser
-
-    if namespace.vcdpassword is None:
-        while True:
-            vcdpasword = input("Please enter vcd password: ")
-            if vcdpasword is not None and len(vcdpasword) > 0:
-                break
-    else:
-        vcdpasword = namespace.vcdpassword
-
-    if namespace.vcdhost is None:
-        while True:
-            vcdhost = input("Please enter vcd host name or ip: ")
-            if vcdhost is not None and len(vcdhost) > 0:
-                break
-    else:
-        vcdhost = namespace.vcdhost
-
-    if namespace.vcdorg is None:
-        while True:
-            vcdorg = input("Please enter vcd organization name: ")
-            if vcdorg is not None and len(vcdorg) > 0:
-                break
-    else:
-        vcdorg = namespace.vcdorg
-
-    try:
-        vim = vimconnector(uuid=None,
-                           name=vcdorg,
-                           tenant_id=None,
-                           tenant_name=namespace.vcdvdc,
-                           url=vcdhost,
-                           url_admin=vcdhost,
-                           user=vcduser,
-                           passwd=vcdpasword,
-                           log_level="DEBUG",
-                           config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword})
-        vim.vca = vim.connect()
-
-    except vimconn.vimconnConnectionException:
-        print("Failed connect to vcloud director. Please check credential and hostname.")
-        return
-
-    # list
-    if command == 'list' or namespace.command == 'list':
-        logger.debug("Client requested list action")
-        # route request to list actions
-        list_actions(vim=vim, action=action, namespace=namespace)
-
-    # view action
-    if command == 'view' or namespace.command == 'view':
-        logger.debug("Client requested view action")
-        view_actions(vim=vim, action=action, namespace=namespace)
-
-    # delete action
-    if command == 'delete' or namespace.command == 'delete':
-        logger.debug("Client requested delete action")
-        delete_actions(vim=vim, action=action, namespace=namespace)
-
-    # create action
-    if command == 'create' or namespace.command == 'create':
-        logger.debug("Client requested create action")
-        create_actions(vim=vim, action=action, namespace=namespace)
-
-    # image action
-    if command == 'image' or namespace.command == 'image':
-        logger.debug("Client requested create action")
-        image_action(vim=vim, action=action, namespace=namespace)
-
-
-if __name__ == '__main__':
-    defaults = {'vcdvdc': 'default',
-                'vcduser': 'admin',
-                'vcdpassword': 'admin',
-                'vcdhost': 'https://localhost',
-                'vcdorg': 'default',
-                'debug': 'INFO'}
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument('-u', '--vcduser', help='vcloud director username', type=str)
-    parser.add_argument('-p', '--vcdpassword', help='vcloud director password', type=str)
-    parser.add_argument('-U', '--vcdamdin', help='vcloud director password', type=str)
-    parser.add_argument('-P', '--vcdadminpassword', help='vcloud director password', type=str)
-    parser.add_argument('-c', '--vcdhost', help='vcloud director host', type=str)
-    parser.add_argument('-o', '--vcdorg', help='vcloud director org', type=str)
-    parser.add_argument('-v', '--vcdvdc', help='vcloud director vdc', type=str)
-    parser.add_argument('-d', '--debug', help='debug level', type=int)
-
-    parser_subparsers = parser.add_subparsers(help='commands', dest='command')
-    sub = parser_subparsers.add_parser('list', help='List objects (VMs, vApps, networks)')
-    sub_subparsers = sub.add_subparsers(dest='action')
-
-    list_vms = sub_subparsers.add_parser('vms', help='list - all vm deployed in vCloud director')
-    list_vapps = sub_subparsers.add_parser('vapps', help='list - all vapps deployed in vCloud director')
-    list_network = sub_subparsers.add_parser('networks', help='list - all networks deployed')
-    list_network.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
-
-    #list vdc
-    list_vdc = sub_subparsers.add_parser('vdc', help='list - list all vdc for organization accessible to you')
-    list_vdc.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
-
-    list_org = sub_subparsers.add_parser('org', help='list - list of organizations accessible to you.')
-
-    create_sub = parser_subparsers.add_parser('create')
-    create_sub_subparsers = create_sub.add_subparsers(dest='action')
-    create_vms = create_sub_subparsers.add_parser('vms')
-    create_vapp = create_sub_subparsers.add_parser('vapp')
-    create_vapp.add_argument('uuid')
-
-    # add network
-    create_network = create_sub_subparsers.add_parser('network')
-    create_network.add_argument('network_name', action='store', help='create a network for a vdc')
-
-    # add VDC
-    create_vdc = create_sub_subparsers.add_parser('vdc')
-    create_vdc.add_argument('vdc_name', action='store', help='create a new VDC for org')
-
-    delete_sub = parser_subparsers.add_parser('delete')
-    del_sub_subparsers = delete_sub.add_subparsers(dest='action')
-    del_vms = del_sub_subparsers.add_parser('vms')
-    del_vapp = del_sub_subparsers.add_parser('vapp')
-    del_vapp.add_argument('uuid', help='view vapp based on UUID')
-
-    # delete network
-    del_network = del_sub_subparsers.add_parser('network')
-    del_network.add_argument('network_name', action='store',
-                             help='- delete network for vcloud director by provided name')
-    del_network.add_argument('-u', '--uuid', default=False, action='store_true',
-                             help='delete network for vcloud director by provided uuid')
-
-    # delete vdc
-    del_vdc = del_sub_subparsers.add_parser('vdc')
-
-    view_sub = parser_subparsers.add_parser('view')
-    view_sub_subparsers = view_sub.add_subparsers(dest='action')
-
-    view_vms_parser = view_sub_subparsers.add_parser('vms')
-    view_vms_parser.add_argument('uuid', default=False, action='store_true',
-                                 help='- View VM for specific uuid in vcloud director')
-    view_vms_parser.add_argument('name', default=False, action='store_true',
-                                 help='- View VM for specific vapp name in vcloud director')
-
-    # view vapp
-    view_vapp_parser = view_sub_subparsers.add_parser('vapp')
-    view_vapp_parser.add_argument('vapp_name', action='store',
-                                  help='- view vapp for specific vapp name in vcloud director')
-    view_vapp_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view vapp based on uuid')
-    view_vapp_parser.add_argument('-o', '--osm', default=False, action='store_true',  help='provide view in OSM format')
-
-    # view network
-    view_network = view_sub_subparsers.add_parser('network')
-    view_network.add_argument('network_name', action='store',
-                              help='- view network for specific network name in vcloud director')
-    view_network.add_argument('-u', '--uuid', default=False, action='store_true', help='view network based on uuid')
-
-    # view VDC command and actions
-    view_vdc = view_sub_subparsers.add_parser('vdc')
-    view_vdc.add_argument('vdc_name', action='store',
-                          help='- View VDC based and action based on provided vdc uuid')
-    view_vdc.add_argument('-u', '--uuid', default=False, action='store_true', help='view vdc based on uuid')
-
-    # view organization command and actions
-    view_org = view_sub_subparsers.add_parser('org')
-    view_org.add_argument('org_name', action='store',
-                          help='- View VDC based and action based on provided vdc uuid')
-    view_org.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
-
-    # upload image action
-    image_sub = parser_subparsers.add_parser('image')
-    image_subparsers = image_sub.add_subparsers(dest='action')
-    upload_parser = image_subparsers.add_parser('upload')
-    upload_parser.add_argument('image', default=False, action='store', help='- valid path to OVF image ')
-    upload_parser.add_argument('catalog', default=False, action='store_true', help='- catalog name')
-
-    # boot vm action
-    boot_parser = image_subparsers.add_parser('boot')
-    boot_parser.add_argument('image', default=False, action='store', help='- Image name')
-    boot_parser.add_argument('vmname', default=False, action='store', help='- VM name')
-    boot_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
-
-    namespace = parser.parse_args()
-    # put command_line args to mapping
-    command_line_args = {k: v for k, v in vars(namespace).items() if v}
-
-    d = defaults.copy()
-    d.update(os.environ)
-    d.update(command_line_args)
-
-    logger = logging.getLogger('mano.vim.vmware')
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    ch = logging.StreamHandler()
-    ch.setLevel(str.upper(d['debug']))
-    ch.setFormatter(formatter)
-    logger.addHandler(ch)
-    logger.setLevel(getattr(logging, str.upper(d['debug'])))
-    logger.info(
-        "Connecting {} username: {} org: {} vdc: {} ".format(d['vcdhost'], d['vcduser'], d['vcdorg'], d['vcdvdc']))
-
-    logger.debug("command: \"{}\" actio: \"{}\"".format(d['command'], d['action']))
-
-    # main entry point.
-    vmwarecli(namespace=namespace)
diff --git a/osm_ro/wim/__init__.py b/osm_ro/wim/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/osm_ro/wim/actions.py b/osm_ro/wim/actions.py
deleted file mode 100644 (file)
index f224460..0000000
+++ /dev/null
@@ -1,423 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-# pylint: disable=E1101,E0203,W0201
-
-"""Common logic for task management"""
-import logging
-from time import time
-from types import StringTypes
-
-from six.moves import range
-
-import yaml
-
-from ..utils import (
-    filter_dict_keys,
-    filter_out_dict_keys,
-    merge_dicts,
-    remove_none_items,
-    truncate
-)
-
-PENDING, REFRESH, IGNORE = range(3)
-
-TIMEOUT = 1 * 60 * 60  # 1 hour
-MIN_ATTEMPTS = 10
-
-
-class Action(object):
-    """Create a basic object representing the action record.
-
-    Arguments:
-        record (dict): record as returned by the database
-        **kwargs: extra keyword arguments to overwrite the fields in record
-    """
-
-    PROPERTIES = [
-        'task_index',          # MD - Index number of the task.
-                               #      This together with the instance_action_id
-                               #      forms a unique key identifier
-        'action',              # MD - CREATE, DELETE, FIND
-        'item',                # MD - table name, eg. instance_wim_nets
-        'item_id',             # MD - uuid of the referenced entry in the
-                               #      previous table
-        'instance_action_id',  # MD - reference to a cohesive group of actions
-                               #      related to the same instance-scenario
-        'wim_account_id',      # MD - reference to the WIM account used
-                               #      by the thread/connector
-        'wim_internal_id',     # MD - internal ID used by the WIM to refer to
-                               #      the item
-        'datacenter_vim_id',   # MD - reference to the VIM account used
-                               #      by the thread/connector
-        'vim_id',              # MD - internal ID used by the VIM to refer to
-                               #      the item
-        'status',              # MD - SCHEDULED,BUILD,DONE,FAILED,SUPERSEDED
-        'extra',               # MD - text with yaml format at database,
-        #                             dict at memory with:
-        # `- params:     list with the params to be sent to the VIM for CREATE
-        #                or FIND. For DELETE the vim_id is taken from other
-        #                related tasks
-        # `- find:       (only for CREATE tasks) if present it should FIND
-        #                before creating and use if existing.
-        #                Contains the FIND params
-        # `- depends_on: list with the 'task_index'es of tasks that must be
-        #                completed before. e.g. a vm creation depends on a net
-        #                creation
-        # `- sdn_net_id: used for net.
-        # `- tries
-        # `- created_items:
-        #                dictionary with extra elements created that need
-        #                to be deleted. e.g. ports,
-        # `- volumes,...
-        # `- created:    False if the VIM element is not created by
-        #                other actions, and it should not be deleted
-        # `- wim_status: WIM status of the element. Stored also at database
-        #                in the item table
-        'params',              # M  - similar to extra[params]
-        'depends_on',          # M  - similar to extra[depends_on]
-        'depends',             # M  - dict with task_index(from depends_on) to
-                               #      task class
-        'error_msg',           # MD - descriptive text upon an error
-        'created_at',          # MD - task DB creation time
-        'modified_at',         # MD - last DB update time
-        'process_at',          # M  - unix epoch when to process the task
-    ]
-
-    __slots__ = PROPERTIES + [
-        'logger',
-    ]
-
-    def __init__(self, record, logger=None, **kwargs):
-        self.logger = logger or logging.getLogger('openmano.wim.action')
-        attrs = merge_dicts(dict.fromkeys(self.PROPERTIES), record, kwargs)
-        self.update(_expand_extra(attrs))
-
-    def __repr__(self):
-        return super(Action, self).__repr__() + repr(self.as_dict())
-
-    def as_dict(self, *fields):
-        """Representation of the object as a dict"""
-        attrs = (set(self.PROPERTIES) & set(fields)
-                 if fields else self.PROPERTIES)
-        return {k: getattr(self, k) for k in attrs}
-
-    def as_record(self):
-        """Returns a dict that can be send to the persistence layer"""
-        special = ['params', 'depends_on', 'depends']
-        record = self.as_dict()
-        record['extra'].update(self.as_dict(*special))
-        non_fields = special + ['process_at']
-
-        return remove_none_items(filter_out_dict_keys(record, non_fields))
-
-    def update(self, values=None, **kwargs):
-        """Update the in-memory representation of the task (works similarly to
-        dict.update). The update is NOT automatically persisted.
-        """
-        # "white-listed mass assignment"
-        updates = merge_dicts(values, kwargs)
-        for attr in set(self.PROPERTIES) & set(updates.keys()):
-            setattr(self, attr, updates[attr])
-
-    def save(self, persistence, **kwargs):
-        """Persist current state of the object to the database.
-
-        Arguments:
-            persistence: object encapsulating the database
-            **kwargs: extra properties to be updated before saving
-
-        Note:
-            If any key word argument is passed, the object itself will be
-            changed as an extra side-effect.
-        """
-        action_id = self.instance_action_id
-        index = self.task_index
-        if kwargs:
-            self.update(kwargs)
-        properties = self.as_record()
-
-        return persistence.update_action(action_id, index, properties)
-
-    def fail(self, persistence, reason, status='FAILED'):
-        """Mark action as FAILED, updating tables accordingly"""
-        persistence.update_instance_action_counters(
-            self.instance_action_id,
-            failed=1,
-            done=(-1 if self.status == 'DONE' else 0))
-
-        self.status = status
-        self.error_msg = truncate(reason)
-        self.logger.error('%s %s: %s', self.id, status, reason)
-        return self.save(persistence)
-
-    def succeed(self, persistence, status='DONE'):
-        """Mark action as DONE, updating tables accordingly"""
-        persistence.update_instance_action_counters(
-            self.instance_action_id, done=1)
-        self.status = status
-        self.logger.debug('%s %s', self.id, status)
-        return self.save(persistence)
-
-    def defer(self, persistence, reason,
-              timeout=TIMEOUT, min_attempts=MIN_ATTEMPTS):
-        """Postpone the task processing, taking care to not timeout.
-
-        Arguments:
-            persistence: object encapsulating the database
-            reason (str): explanation for the delay
-            timeout (int): maximum delay tolerated since the first attempt.
-                Note that this number is a time delta, in seconds
-            min_attempts (int): Number of attempts to try before giving up.
-        """
-        now = time()
-        last_attempt = self.extra.get('last_attempted_at') or time()
-        attempts = self.extra.get('attempts') or 0
-
-        if last_attempt - now > timeout and attempts > min_attempts:
-            self.fail(persistence,
-                      'Timeout reached. {} attempts in the last {:d} min'
-                      .format(attempts, last_attempt / 60))
-
-        self.extra['last_attempted_at'] = time()
-        self.extra['attempts'] = attempts + 1
-        self.logger.info('%s DEFERRED: %s', self.id, reason)
-        return self.save(persistence)
-
-    @property
-    def group_key(self):
-        """Key defining the group to which this tasks belongs"""
-        return (self.item, self.item_id)
-
-    @property
-    def processing(self):
-        """Processing status for the task (PENDING, REFRESH, IGNORE)"""
-        if self.status == 'SCHEDULED':
-            return PENDING
-
-        return IGNORE
-
-    @property
-    def id(self):
-        """Unique identifier of this particular action"""
-        return '{}[{}]'.format(self.instance_action_id, self.task_index)
-
-    @property
-    def is_scheduled(self):
-        return self.status == 'SCHEDULED'
-
-    @property
-    def is_build(self):
-        return self.status == 'BUILD'
-
-    @property
-    def is_done(self):
-        return self.status == 'DONE'
-
-    @property
-    def is_failed(self):
-        return self.status == 'FAILED'
-
-    @property
-    def is_superseded(self):
-        return self.status == 'SUPERSEDED'
-
-    def refresh(self, connector, persistence):
-        """Use the connector/persistence to refresh the status of the item.
-
-        After the item status is refreshed any change in the task should be
-        persisted to the database.
-
-        Arguments:
-            connector: object containing the classes to access the WIM or VIM
-            persistence: object containing the methods necessary to query the
-                database and to persist the updates
-        """
-        self.logger.debug(
-            'Action `%s` has no refresh to be done',
-            self.__class__.__name__)
-
-    def expand_dependency_links(self, task_group):
-        """Expand task indexes into actual IDs"""
-        if not self.depends_on or (
-                isinstance(self.depends, dict) and self.depends):
-            return
-
-        num_tasks = len(task_group)
-        references = {
-            "TASK-{}".format(i): task_group[i]
-            for i in self.depends_on
-            if i < num_tasks and task_group[i].task_index == i and
-            task_group[i].instance_action_id == self.instance_action_id
-        }
-        self.depends = references
-
-    def become_superseded(self, superseding):
-        """When another action tries to supersede this one,
-        we need to change both of them, so the surviving actions will be
-        logic consistent.
-
-        This method should do the required internal changes, and also
-        suggest changes for the other, superseding, action.
-
-        Arguments:
-            superseding: other task superseding this one
-
-        Returns:
-            dict: changes suggested to the action superseding this one.
-                  A special key ``superseding_needed`` is used to
-                  suggest if the superseding is actually required or not.
-                  If not present, ``superseding_needed`` is assumed to
-                  be False.
-        """
-        self.status = 'SUPERSEDED'
-        self.logger.debug(
-            'Action `%s` was superseded by `%s`',
-            self.__class__.__name__, superseding.__class__.__name__)
-        return {}
-
-    def supersede(self, others):
-        """Supersede other tasks, if necessary
-
-        Arguments:
-            others (list): action objects being superseded
-
-        When the task decide to supersede others, this method should call
-        ``become_superseded`` on the other actions, collect the suggested
-        updates and perform the necessary changes
-        """
-        # By default actions don't supersede others
-        self.logger.debug(
-            'Action `%s` does not supersede other actions',
-            self.__class__.__name__)
-
-    def process(self, connector, persistence, ovim):
-        """Abstract method, that needs to be implemented.
-        Process the current task.
-
-        Arguments:
-            connector: object with API for accessing the WAN
-                Infrastructure Manager system
-            persistence: abstraction layer for the database
-            ovim: instance of openvim, abstraction layer that enable
-                SDN-related operations
-        """
-        raise NotImplementedError
-
-
-class FindAction(Action):
-    """Abstract class that should be inherited for FIND actions, depending on
-    the item type.
-    """
-    @property
-    def processing(self):
-        if self.status in ('DONE', 'BUILD'):
-            return REFRESH
-
-        return super(FindAction, self).processing
-
-    def become_superseded(self, superseding):
-        super(FindAction, self).become_superseded(superseding)
-        info = ('vim_id', 'wim_internal_id')
-        return remove_none_items({f: getattr(self, f) for f in info})
-
-
-class CreateAction(Action):
-    """Abstract class that should be inherited for CREATE actions, depending on
-    the item type.
-    """
-    @property
-    def processing(self):
-        if self.status in ('DONE', 'BUILD'):
-            return REFRESH
-
-        return super(CreateAction, self).processing
-
-    def become_superseded(self, superseding):
-        super(CreateAction, self).become_superseded(superseding)
-
-        created = self.extra.get('created', True)
-        sdn_net_id = self.extra.get('sdn_net_id')
-        pending_info = self.wim_internal_id or self.vim_id or sdn_net_id
-        if not(created and pending_info):
-            return {}
-
-        extra_fields = ('sdn_net_id', 'interfaces', 'created_items')
-        extra_info = filter_dict_keys(self.extra or {}, extra_fields)
-
-        return {'superseding_needed': True,
-                'wim_internal_id': self.wim_internal_id,
-                'vim_id': self.vim_id,
-                'extra': remove_none_items(extra_info)}
-
-
-class DeleteAction(Action):
-    """Abstract class that should be inherited for DELETE actions, depending on
-    the item type.
-    """
-    def supersede(self, others):
-        self.logger.debug('%s %s %s %s might supersede other actions',
-                          self.id, self.action, self.item, self.item_id)
-        # First collect all the changes from the superseded tasks
-        changes = [other.become_superseded(self) for other in others]
-        needed = any(change.pop('superseding_needed', False)
-                     for change in changes)
-
-        # Deal with the nested ones first
-        extras = [change.pop('extra', None) or {} for change in changes]
-        items = [extra.pop('created_items', None) or {} for extra in extras]
-        items = merge_dicts(self.extra.get('created_items', {}), *items)
-        self.extra = merge_dicts(self.extra, {'created_items': items}, *extras)
-
-        # Accept the other ones
-        change = ((key, value) for key, value in merge_dicts(*changes).items()
-                  if key in self.PROPERTIES)
-        for attr, value in change:
-            setattr(self, attr, value)
-
-        # Reevaluate if the action itself is needed
-        if not needed:
-            self.status = 'SUPERSEDED'
-
-
-def _expand_extra(record):
-    extra = record.pop('extra', None) or {}
-    if isinstance(extra, StringTypes):
-        extra = yaml.safe_load(extra)
-
-    record['params'] = extra.get('params')
-    record['depends_on'] = extra.get('depends_on', [])
-    record['depends'] = extra.get('depends', None)
-    record['extra'] = extra
-
-    return record
diff --git a/osm_ro/wim/engine.py b/osm_ro/wim/engine.py
deleted file mode 100644 (file)
index 3fdd032..0000000
+++ /dev/null
@@ -1,534 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-"""This module contains the domain logic, and the implementation of the
-required steps to perform VNF management and orchestration in a WAN
-environment.
-
-It works as an extension/complement to the main functions contained in the
-``nfvo.py`` file and avoids interacting directly with the database, by relying
-on the `persistence` module.
-
-No http request handling/direct interaction with the database should be present
-in this file.
-"""
-import json
-import logging
-from contextlib import contextmanager
-from itertools import groupby
-from operator import itemgetter
-from sys import exc_info
-from uuid import uuid4
-
-from six import reraise
-
-from ..utils import remove_none_items
-from .actions import Action
-from .errors import (
-    DbBaseException,
-    NoWimConnectedToDatacenters,
-    UnexpectedDatabaseError,
-    WimAccountNotActive
-)
-from .wim_thread import WimThread
-
-
-class WimEngine(object):
-    """Logic supporting the establishment of WAN links when NS spans across
-    different datacenters.
-    """
-    def __init__(self, persistence, logger=None, ovim=None):
-        self.persist = persistence
-        self.logger = logger or logging.getLogger('openmano.wim.engine')
-        self.threads = {}
-        self.connectors = {}
-        self.ovim = ovim
-
-    def create_wim(self, properties):
-        """Create a new wim record according to the properties
-
-        Please check the wim schema to have more information about
-        ``properties``.
-
-        The ``config`` property might contain a ``wim_port_mapping`` dict,
-        In this case, the method ``create_wim_port_mappings`` will be
-        automatically invoked.
-
-        Returns:
-            str: uuid of the newly created WIM record
-        """
-        port_mapping = ((properties.get('config', {}) or {})
-                        .pop('wim_port_mapping', {}))
-        uuid = self.persist.create_wim(properties)
-
-        if port_mapping:
-            try:
-                self.create_wim_port_mappings(uuid, port_mapping)
-            except DbBaseException:
-                # Rollback
-                self.delete_wim(uuid)
-                ex = UnexpectedDatabaseError('Failed to create port mappings'
-                                             'Rolling back wim creation')
-                self.logger.exception(str(ex))
-                reraise(ex.__class__, ex, exc_info()[2])
-
-        return uuid
-
-    def get_wim(self, uuid_or_name, tenant_id=None):
-        """Retrieve existing WIM record by name or id.
-
-        If ``tenant_id`` is specified, the query will be
-        limited to the WIM associated to the given tenant.
-        """
-        # Since it is a pure DB operation, we can delegate it directly
-        return self.persist.get_wim(uuid_or_name, tenant_id)
-
-    def update_wim(self, uuid_or_name, properties):
-        """Edit an existing WIM record.
-
-        ``properties`` is a dictionary with the properties being changed,
-        if a property is not present, the old value will be preserved
-
-        Similarly to create_wim, the ``config`` property might contain a
-        ``wim_port_mapping`` dict, In this case, port mappings will be
-        automatically updated.
-        """
-        port_mapping = ((properties.get('config', {}) or {})
-                        .pop('wim_port_mapping', {}))
-        orig_props = self.persist.get_by_name_or_uuid('wims', uuid_or_name)
-        uuid = orig_props['uuid']
-
-        response = self.persist.update_wim(uuid, properties)
-
-        if port_mapping:
-            try:
-                # It is very complex to diff and update individually all the
-                # port mappings. Therefore a practical approach is just delete
-                # and create it again.
-                self.persist.delete_wim_port_mappings(uuid)
-                # ^  Calling from persistence avoid reloading twice the thread
-                self.create_wim_port_mappings(uuid, port_mapping)
-            except DbBaseException:
-                # Rollback
-                self.update_wim(uuid_or_name, orig_props)
-                ex = UnexpectedDatabaseError('Failed to update port mappings'
-                                             'Rolling back wim updates\n')
-                self.logger.exception(str(ex))
-                reraise(ex.__class__, ex, exc_info()[2])
-
-        return response
-
-    def delete_wim(self, uuid_or_name):
-        """Kill the corresponding wim threads and erase the WIM record"""
-        # Theoretically, we can rely on the database to drop the wim_accounts
-        # automatically, since we have configures 'ON CASCADE DELETE'.
-        # However, use use `delete_wim_accounts` to kill all the running
-        # threads.
-        self.delete_wim_accounts(uuid_or_name)
-        return self.persist.delete_wim(uuid_or_name)
-
-    def create_wim_account(self, wim, tenant, properties):
-        """Create an account that associates a tenant to a WIM.
-
-        As a side effect this function will spawn a new thread
-
-        Arguments:
-            wim (str): name or uuid of the WIM related to the account being
-                created
-            tenant (str): name or uuid of the nfvo tenant to which the account
-                will be created
-            properties (dict): properties of the account
-                (eg. username, password, ...)
-
-        Returns:
-            dict: Created record
-        """
-        uuid = self.persist.create_wim_account(wim, tenant, properties)
-        account = self.persist.get_wim_account_by(uuid=uuid)
-        # ^  We need to use get_wim_account_by here, since this methods returns
-        #    all the associations, and we need the wim to create the thread
-        self._spawn_thread(account)
-        return account
-
-    def _update_single_wim_account(self, account, properties):
-        """Update WIM Account, taking care to reload the corresponding thread
-
-        Arguments:
-            account (dict): Current account record
-            properties (dict): Properties to be updated
-
-        Returns:
-            dict: updated record
-        """
-        account = self.persist.update_wim_account(account['uuid'], properties)
-        self.threads[account['uuid']].reload()
-        return account
-
-    def update_wim_accounts(self, wim, tenant, properties):
-        """Update all the accounts related to a WIM and a tenant,
-        thanking care of reloading threads.
-
-        Arguments:
-            wim (str): uuid or name of a WIM record
-            tenant (str): uuid or name of a NFVO tenant record
-            properties (dict): attributes with values to be updated
-
-        Returns
-            list: Records that were updated
-        """
-        accounts = self.persist.get_wim_accounts_by(wim, tenant)
-        return [self._update_single_wim_account(account, properties)
-                for account in accounts]
-
-    def _delete_single_wim_account(self, account):
-        """Delete WIM Account, taking care to remove the corresponding thread
-        and delete the internal WIM account, if it was automatically generated.
-
-        Arguments:
-            account (dict): Current account record
-            properties (dict): Properties to be updated
-
-        Returns:
-            dict: current record (same as input)
-        """
-        self.persist.delete_wim_account(account['uuid'])
-
-        if account['uuid'] not in self.threads:
-            raise WimAccountNotActive(
-                'Requests send to the WIM Account %s are not currently '
-                'being processed.', account['uuid'])
-        else:
-            self.threads[account['uuid']].exit()
-            del self.threads[account['uuid']]
-
-        return account
-
-    def delete_wim_accounts(self, wim, tenant=None, **kwargs):
-        """Delete all the accounts related to a WIM (and a tenant),
-        thanking care of threads and internal WIM accounts.
-
-        Arguments:
-            wim (str): uuid or name of a WIM record
-            tenant (str): uuid or name of a NFVO tenant record
-
-        Returns
-            list: Records that were deleted
-        """
-        kwargs.setdefault('error_if_none', False)
-        accounts = self.persist.get_wim_accounts_by(wim, tenant, **kwargs)
-        return [self._delete_single_wim_account(a) for a in accounts]
-
-    def _reload_wim_threads(self, wim_id):
-        for thread in self.threads.values():
-            if thread.wim_account['wim_id'] == wim_id:
-                thread.reload()
-
-    def create_wim_port_mappings(self, wim, properties, tenant=None):
-        """Store information about port mappings from Database"""
-        # TODO: Review tenants... WIMs can exist across different tenants,
-        #       and the port_mappings are a WIM property, not a wim_account
-        #       property, so the concepts are not related
-        wim = self.persist.get_by_name_or_uuid('wims', wim)
-        result = self.persist.create_wim_port_mappings(wim, properties, tenant)
-        self._reload_wim_threads(wim['uuid'])
-        return result
-
-    def get_wim_port_mappings(self, wim):
-        """Retrive information about port mappings from Database"""
-        return self.persist.get_wim_port_mappings(wim)
-
-    def delete_wim_port_mappings(self, wim):
-        """Erase the port mapping records associated with the WIM"""
-        wim = self.persist.get_by_name_or_uuid('wims', wim)
-        message = self.persist.delete_wim_port_mappings(wim['uuid'])
-        self._reload_wim_threads(wim['uuid'])
-        return message
-
-    def find_common_wims(self, datacenter_ids, tenant):
-        """Find WIMs that are common to all datacenters listed"""
-        mappings = self.persist.get_wim_port_mappings(
-            datacenter=datacenter_ids, tenant=tenant, error_if_none=False)
-
-        wim_id_of = itemgetter('wim_id')
-        sorted_mappings = sorted(mappings, key=wim_id_of)  # needed by groupby
-        grouped_mappings = groupby(sorted_mappings, key=wim_id_of)
-        mapped_datacenters = {
-            wim_id: [m['datacenter_id'] for m in mappings]
-            for wim_id, mappings in grouped_mappings
-        }
-
-        return [
-            wim_id
-            for wim_id, connected_datacenters in mapped_datacenters.items()
-            if set(connected_datacenters) >= set(datacenter_ids)
-        ]
-
-    def find_common_wim(self, datacenter_ids, tenant):
-        """Find a single WIM that is able to connect all the datacenters
-        listed
-
-        Raises:
-            NoWimConnectedToDatacenters: if no WIM connected to all datacenters
-                at once is found
-        """
-        suitable_wim_ids = self.find_common_wims(datacenter_ids, tenant)
-
-        if not suitable_wim_ids:
-            raise NoWimConnectedToDatacenters(datacenter_ids)
-
-        # TODO: use a criteria to determine which WIM is going to be used,
-        #       instead of always using the first one (strategy pattern can be
-        #       used here)
-        return suitable_wim_ids[0]
-
-    def find_suitable_wim_account(self, datacenter_ids, tenant):
-        """Find a WIM account that is able to connect all the datacenters
-        listed
-
-        Arguments:
-            datacenter_ids (list): List of UUIDs of all the datacenters (vims),
-                that need to be connected.
-            tenant (str): UUID of the OSM tenant
-
-        Returns:
-            object with the WIM account that is able to connect all the
-                 datacenters.
-        """
-        wim_id = self.find_common_wim(datacenter_ids, tenant)
-        return self.persist.get_wim_account_by(wim_id, tenant)
-
-    def derive_wan_link(self,
-                        wim_usage,
-                        instance_scenario_id, sce_net_id,
-                        networks, tenant, related=None):
-        """Create a instance_wim_nets record for the given information"""
-        if sce_net_id in wim_usage:
-            account_id = wim_usage[sce_net_id]
-            account = self.persist.get_wim_account_by(uuid=account_id)
-            wim_id = account['wim_id']
-        else:
-            datacenters = [n['datacenter_id'] for n in networks]
-            wim_id = self.find_common_wim(datacenters, tenant)
-            account = self.persist.get_wim_account_by(wim_id, tenant)
-
-        return {
-            'uuid': str(uuid4()),
-            'instance_scenario_id': instance_scenario_id,
-            'sce_net_id': sce_net_id,
-            'wim_id': wim_id,
-            'wim_account_id': account['uuid'],
-            'related': related
-        }
-
-    def derive_wan_links(self, wim_usage, networks, tenant=None):
-        """Discover and return what are the wan_links that have to be created
-        considering a set of networks (VLDs) required for a scenario instance
-        (NSR).
-
-        Arguments:
-            wim_usage(dict): Mapping between sce_net_id and wim_id. If wim_id is False, means not create wam_links
-            networks(list): Dicts containing the information about the networks
-                that will be instantiated to materialize a Network Service
-                (scenario) instance.
-                Corresponding to the ``instance_net`` record.
-
-        Returns:
-            list: list of WAN links to be written to the database
-        """
-        # Group networks by key=(instance_scenario_id, sce_net_id)
-        related = None
-        if networks:
-            related = networks[0].get("related")
-        filtered = _filter_multi_vim(networks)
-        grouped_networks = _group_networks(filtered)
-        datacenters_per_group = _count_datacenters(grouped_networks)
-        # For each group count the number of networks. If greater then 1,
-        # we have to create a wan link connecting them.
-        wan_groups = [key
-                      for key, counter in datacenters_per_group
-                      if counter > 1]
-        # Keys are tuples(instance_scenario_id, sce_net_id)
-        return [
-            self.derive_wan_link(wim_usage,
-                                 key[0], key[1], grouped_networks[key], tenant, related)
-            for key in wan_groups if wim_usage.get(key[1]) is not False
-        ]
-
-    def create_action(self, wan_link):
-        """For a single wan_link create the corresponding create action"""
-        return {
-            'action': 'CREATE',
-            'status': 'SCHEDULED',
-            'item': 'instance_wim_nets',
-            'item_id': wan_link['uuid'],
-            'wim_account_id': wan_link['wim_account_id']
-        }
-
-    def create_actions(self, wan_links):
-        """For an array of wan_links, create all the corresponding actions"""
-        return [self.create_action(l) for l in wan_links]
-
-    def delete_action(self, wan_link):
-        """For a single wan_link create the corresponding create action"""
-        return {
-            'action': 'DELETE',
-            'status': 'SCHEDULED',
-            'item': 'instance_wim_nets',
-            'item_id': wan_link['uuid'],
-            'wim_account_id': wan_link['wim_account_id'],
-            'extra': json.dumps({'wan_link': wan_link})
-            # We serialize and cache the wan_link here, because it can be
-            # deleted during the delete process
-        }
-
-    def delete_actions(self, wan_links=(), instance_scenario_id=None):
-        """Given a Instance Scenario, remove all the WAN Links created in the
-        past"""
-        if instance_scenario_id:
-            wan_links = self.persist.get_wan_links(
-                instance_scenario_id=instance_scenario_id)
-        return [self.delete_action(l) for l in wan_links]
-
-    def incorporate_actions(self, wim_actions, instance_action):
-        """Make the instance action consider new WIM actions and make the WIM
-        actions aware of the instance action
-        """
-        current = instance_action.setdefault('number_tasks', 0)
-        for i, action in enumerate(wim_actions):
-            action['task_index'] = current + i
-            action['instance_action_id'] = instance_action['uuid']
-        instance_action['number_tasks'] += len(wim_actions)
-
-        return wim_actions, instance_action
-
-    def dispatch(self, tasks):
-        """Enqueue a list of tasks for further processing.
-
-        This function is supposed to be called outside from the WIM Thread.
-        """
-        for task in tasks:
-            if task['wim_account_id'] not in self.threads:
-                error_msg = str(WimAccountNotActive(
-                    'Requests send to the WIM Account %s are not currently '
-                    'being processed.', task['wim_account_id']))
-                Action(task, self.logger).fail(self.persist, error_msg)
-                self.persist.update_wan_link(task['item_id'],
-                                             {'status': 'ERROR',
-                                              'error_msg': error_msg})
-                self.logger.error('Task %s %s %s not dispatched.\n%s',
-                                  task['action'], task['item'],
-                                  task['instance_account_id'], error_msg)
-            else:
-                self.threads[task['wim_account_id']].insert_task(task)
-                self.logger.debug('Task %s %s %s dispatched',
-                                  task['action'], task['item'],
-                                  task['instance_action_id'])
-
-    def _spawn_thread(self, wim_account):
-        """Spawn a WIM thread
-
-        Arguments:
-            wim_account (dict): WIM information (usually persisted)
-                The `wim` field is required to be set with a valid WIM record
-                inside the `wim_account` dict
-
-        Return:
-            threading.Thread: Thread object
-        """
-        thread = None
-        try:
-            thread = WimThread(self.persist, wim_account, ovim=self.ovim)
-            self.threads[wim_account['uuid']] = thread
-            thread.start()
-        except:  # noqa
-            self.logger.error('Error when spawning WIM thread for %s',
-                              wim_account['uuid'], exc_info=True)
-
-        return thread
-
-    def start_threads(self):
-        """Start the threads responsible for processing WIM Actions"""
-        accounts = self.persist.get_wim_accounts(error_if_none=False)
-        self.threads = remove_none_items(
-            {a['uuid']: self._spawn_thread(a) for a in accounts})
-
-    def stop_threads(self):
-        """Stop the threads responsible for processing WIM Actions"""
-        for uuid, thread in self.threads.items():
-            thread.exit()
-            del self.threads[uuid]
-
-    @contextmanager
-    def threads_running(self):
-        """Ensure no thread will be left running"""
-        # This method is particularly important for testing :)
-        try:
-            self.start_threads()
-            yield
-        finally:
-            self.stop_threads()
-
-
-def _filter_multi_vim(networks):
-    """Ignore networks without sce_net_id (all VNFs go to the same VIM)"""
-    return [n for n in networks if 'sce_net_id' in n and n['sce_net_id']]
-
-
-def _group_networks(networks):
-    """Group networks that correspond to the same instance_scenario_id and
-    sce_net_id (NSR and VLD).
-
-    Arguments:
-        networks(list): Dicts containing the information about the networks
-            that will be instantiated to materialize a Network Service
-            (scenario) instance.
-    Returns:
-        dict: Keys are tuples (instance_scenario_id, sce_net_id) and values
-            are list of networks.
-    """
-    criteria = itemgetter('instance_scenario_id', 'sce_net_id')
-
-    networks = sorted(networks, key=criteria)
-    return {k: list(v) for k, v in groupby(networks, key=criteria)}
-
-
-def _count_datacenters(grouped_networks):
-    """Count the number of datacenters in each group of networks
-
-    Returns:
-        list of tuples: the first element is the group key, while the second
-            element is the number of datacenters in each group.
-    """
-    return ((key, len(set(n['datacenter_id'] for n in group)))
-            for key, group in grouped_networks.items())
diff --git a/osm_ro/wim/errors.py b/osm_ro/wim/errors.py
deleted file mode 100644 (file)
index ca8c2b7..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-from six.moves import queue
-
-from ..db_base import db_base_Exception as DbBaseException
-from ..http_tools.errors import (
-    Bad_Request,
-    Conflict,
-    HttpMappedError,
-    Internal_Server_Error,
-    Not_Found
-)
-
-
-class NoRecordFound(DbBaseException):
-    """No record was found in the database"""
-
-    def __init__(self, criteria, table=None):
-        table_info = '{} - '.format(table) if table else ''
-        super(NoRecordFound, self).__init__(
-            '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
-            http_code=Not_Found)
-
-
-class MultipleRecordsFound(DbBaseException):
-    """More than one record was found in the database"""
-
-    def __init__(self, criteria, table=None):
-        table_info = '{} - '.format(table) if table else ''
-        super(MultipleRecordsFound, self).__init__(
-            '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
-            http_code=Conflict)
-
-
-class WimAndTenantNotAttached(DbBaseException):
-    """Wim and Tenant are not attached"""
-
-    def __init__(self, wim, tenant):
-        super(WimAndTenantNotAttached, self).__init__(
-            '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
-            http_code=Conflict)
-
-
-class WimAndTenantAlreadyAttached(DbBaseException):
-    """There is already a wim account attaching the given wim and tenant"""
-
-    def __init__(self, wim, tenant):
-        super(WimAndTenantAlreadyAttached, self).__init__(
-            '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
-            http_code=Conflict)
-
-
-class NoWimConnectedToDatacenters(NoRecordFound):
-    """No WIM that is able to connect the given datacenters was found"""
-
-
-class InvalidParameters(DbBaseException):
-    """The given parameters are invalid"""
-
-    def __init__(self, message, http_code=Bad_Request):
-        super(InvalidParameters, self).__init__(message, http_code)
-
-
-class UndefinedAction(HttpMappedError):
-    """No action found"""
-
-    def __init__(self, item_type, action, http_code=Internal_Server_Error):
-        message = ('The action {} {} is not defined'.format(action, item_type))
-        super(UndefinedAction, self).__init__(message, http_code)
-
-
-class UndefinedWimConnector(DbBaseException):
-    """The connector class for the specified wim type is not implemented"""
-
-    def __init__(self, wim_type, module_name, location_reference):
-        super(UndefinedWimConnector, self).__init__(
-            ('{}: `{}`. Could not find module `{}` '
-             '(check if it is necessary to install a plugin)'
-             .format(self.__class__.__doc__, wim_type, module_name)),
-            http_code=Bad_Request)
-
-
-class WimAccountOverwrite(DbBaseException):
-    """An attempt to overwrite an existing WIM account was identified"""
-
-    def __init__(self, wim_account, diff=None, tip=None):
-        message = self.__class__.__doc__
-        account_info = (
-            'Account -- name: {name}, uuid: {uuid}'.format(**wim_account)
-            if wim_account else '')
-        diff_info = (
-            'Differing fields: ' + ', '.join(diff.keys()) if diff else '')
-
-        super(WimAccountOverwrite, self).__init__(
-            '\n'.join(m for m in (message, account_info, diff_info, tip) if m),
-            http_code=Conflict)
-
-
-class UnexpectedDatabaseError(DbBaseException):
-    """The database didn't raised an exception but also the query was not
-    executed (maybe the connection had some problems?)
-    """
-
-
-class UndefinedUuidOrName(DbBaseException):
-    """Trying to query for a record using an empty uuid or name"""
-
-    def __init__(self, table=None):
-        table_info = '{} - '.format(table.split()[0]) if table else ''
-        super(UndefinedUuidOrName, self).__init__(
-            table_info + self.__class__.__doc__, http_status=Bad_Request)
-
-
-class UndefinedWanMappingType(InvalidParameters):
-    """The dict wan_service_mapping_info MUST contain a `type` field"""
-
-    def __init__(self, given):
-        super(UndefinedWanMappingType, self).__init__(
-            '{}. Given: `{}`'.format(self.__class__.__doc__, given))
-
-
-class QueueFull(HttpMappedError, queue.Full):
-    """Thread queue is full"""
-
-    def __init__(self, thread_name, http_code=Internal_Server_Error):
-        message = ('Thread {} queue is full'.format(thread_name))
-        super(QueueFull, self).__init__(message, http_code)
-
-
-class InconsistentState(HttpMappedError):
-    """An unexpected inconsistency was find in the state of the program"""
-
-    def __init__(self, arg, http_code=Internal_Server_Error):
-        if isinstance(arg, HttpMappedError):
-            http_code = arg.http_code
-            message = str(arg)
-        else:
-            message = arg
-
-        super(InconsistentState, self).__init__(message, http_code)
-
-
-class WimAccountNotActive(HttpMappedError, KeyError):
-    """WIM Account is not active yet (no thread is running)"""
-
-    def __init__(self, message, http_code=Internal_Server_Error):
-        message += ('\nThe thread responsible for processing the actions have '
-                    'suddenly stopped, or have never being spawned')
-        super(WimAccountNotActive, self).__init__(message, http_code)
-
-
-class NoExternalPortFound(HttpMappedError):
-    """No external port associated to the instance_net"""
-
-    def __init__(self, instance_net):
-        super(NoExternalPortFound, self).__init__(
-            '{} uuid({})'.format(self.__class__.__doc__, instance_net['uuid']),
-            http_code=Not_Found)
diff --git a/osm_ro/wim/failing_connector.py b/osm_ro/wim/failing_connector.py
deleted file mode 100644 (file)
index b66551c..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-"""In the case any error happens when trying to initiate the WIM Connector,
-we need a replacement for it, that will throw an error every time we try to
-execute any action
-"""
-import json
-from .wimconn import WimConnectorError
-
-
-class FailingConnector(object):
-    """Placeholder for a connector whose incitation failed,
-    This place holder will just raise an error every time an action is needed
-    from the connector.
-
-    This way we can make sure that all the other parts of the program will work
-    but the user will have all the information available to fix the problem.
-    """
-    def __init__(self, error_msg):
-        self.error_msg = error_msg
-
-    def check_credentials(self):
-        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
-
-    def get_connectivity_service_status(self, service_uuid, _conn_info=None):
-        raise WimConnectorError('Impossible to retrieve status for {}\n\n{}'
-                                .format(service_uuid, self.error_msg))
-
-    def create_connectivity_service(self, service_uuid, *args, **kwargs):
-        raise WimConnectorError('Impossible to connect {}.\n{}\n{}\n{}'
-                                .format(service_uuid, self.error_msg,
-                                        json.dumps(args, indent=4),
-                                        json.dumps(kwargs, indent=4)))
-
-    def delete_connectivity_service(self, service_uuid, _conn_info=None):
-        raise WimConnectorError('Impossible to disconnect {}\n\n{}'
-                                .format(service_uuid, self.error_msg))
-
-    def edit_connectivity_service(self, service_uuid, *args, **kwargs):
-        raise WimConnectorError('Impossible to change connection {}.\n{}\n'
-                                '{}\n{}'
-                                .format(service_uuid, self.error_msg,
-                                        json.dumps(args, indent=4),
-                                        json.dumps(kwargs, indent=4)))
-
-    def clear_all_connectivity_services(self):
-        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
-
-    def get_all_active_connectivity_services(self):
-        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
diff --git a/osm_ro/wim/http_handler.py b/osm_ro/wim/http_handler.py
deleted file mode 100644 (file)
index b88dab3..0000000
+++ /dev/null
@@ -1,226 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-"""This module works as an extension to the toplevel ``httpserver`` module,
-implementing callbacks for the HTTP routes related to the WIM features of OSM.
-
-Acting as a front-end, it is responsible for converting the HTTP request
-payload into native python objects, calling the correct engine methods
-and converting back the response objects into strings to be send in the HTTP
-response payload.
-
-Direct domain/persistence logic should be avoided in this file, instead
-calls to other layers should be done.
-"""
-import logging
-
-from bottle import request
-
-from .. import utils
-from ..http_tools.errors import ErrorHandler
-from ..http_tools.handler import BaseHandler, route
-from ..http_tools.request_processing import (
-    filter_query_string,
-    format_in,
-    format_out
-)
-from .engine import WimEngine
-from .persistence import WimPersistence
-from .schemas import (
-    wim_account_schema,
-    wim_edit_schema,
-    wim_port_mapping_schema,
-    wim_schema
-)
-
-
-class WimHandler(BaseHandler):
-    """HTTP route implementations for WIM related URLs
-
-    Arguments:
-        db: instance of mydb [optional]. This argument must be provided
-            if not ``persistence`` is passed
-        persistence (WimPersistence): High-level data storage abstraction
-            [optional]. If this argument is not present, ``db`` must be.
-        engine (WimEngine): Implementation of the business logic
-            for the engine of WAN networks
-        logger (logging.Logger): logger object [optional]
-        url_base(str): Path fragment to be prepended to the routes [optional]
-        plugins(list): List of bottle plugins to be applied to routes
-            [optional]
-    """
-    def __init__(self, db=None, persistence=None, engine=None,
-                 url_base='', logger=None, plugins=()):
-        self.persist = persistence or WimPersistence(db)
-        self.engine = engine or WimEngine(self.persist)
-        self.url_base = url_base
-        self.logger = logger or logging.getLogger('openmano.wim.http')
-        error_handler = ErrorHandler(self.logger)
-        self.plugins = [error_handler] + list(plugins)
-
-    @route('GET', '/<tenant_id>/wims')
-    def http_list_wims(self, tenant_id):
-        allowed_fields = ('uuid', 'name', 'wim_url', 'type', 'created_at')
-        select_, where_, limit_ = filter_query_string(
-            request.query, None, allowed_fields)
-        # ^  Since we allow the user to customize the db query using the HTTP
-        #    query and it is quite difficult to re-use this query, let's just
-        #    do a ad-hoc call to the db
-
-        from_ = 'wims'
-        if tenant_id != 'any':
-            where_['nfvo_tenant_id'] = tenant_id
-            if 'created_at' in select_:
-                select_[select_.index('created_at')] = (
-                    'w.created_at as created_at')
-            if 'created_at' in where_:
-                where_['w.created_at'] = where_.pop('created_at')
-            from_ = ('wims as w join wim_nfvo_tenants as wt '
-                     'on w.uuid=wt.wim_id')
-
-        wims = self.persist.query(
-            FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_,
-            error_if_none=False)
-
-        utils.convert_float_timestamp2str(wims)
-        return format_out({'wims': wims})
-
-    @route('GET', '/<tenant_id>/wims/<wim_id>')
-    def http_get_wim(self, tenant_id, wim_id):
-        tenant_id = None if tenant_id == 'any' else tenant_id
-        wim = self.engine.get_wim(wim_id, tenant_id)
-        mappings = self.engine.get_wim_port_mappings(wim_id)
-        wim['config'] = utils.merge_dicts(wim.get('config', {}) or {},
-                                          wim_port_mapping=mappings)
-        return format_out({'wim': wim})
-
-    @route('POST', '/wims')
-    def http_create_wim(self):
-        http_content, _ = format_in(wim_schema, confidential_data=True)
-        r = utils.remove_extra_items(http_content, wim_schema)
-        if r:
-            self.logger.debug("Remove extra items received %r", r)
-        data = self.engine.create_wim(http_content['wim'])
-        return self.http_get_wim('any', data)
-
-    @route('PUT', '/wims/<wim_id>')
-    def http_update_wim(self, wim_id):
-        '''edit wim details, can use both uuid or name'''
-        # parse input data
-        http_content, _ = format_in(wim_edit_schema)
-        r = utils.remove_extra_items(http_content, wim_edit_schema)
-        if r:
-            self.logger.debug("Remove received extra items %s", r)
-
-        wim_id = self.engine.update_wim(wim_id, http_content['wim'])
-        return self.http_get_wim('any', wim_id)
-
-    @route('DELETE', '/wims/<wim_id>')
-    def http_delete_wim(self, wim_id):
-        """Delete a wim from a database, can use both uuid or name"""
-        data = self.engine.delete_wim(wim_id)
-        # TODO Remove WIM in orchestrator
-        return format_out({"result": "wim '" + data + "' deleted"})
-
-    @route('POST', '/<tenant_id>/wims/<wim_id>')
-    def http_create_wim_account(self, tenant_id, wim_id):
-        """Associate an existing wim to this tenant"""
-        # parse input data
-        http_content, _ = format_in(
-            wim_account_schema, confidential_data=True)
-        removed = utils.remove_extra_items(http_content, wim_account_schema)
-        removed and self.logger.debug("Remove extra items %r", removed)
-        account = self.engine.create_wim_account(
-            wim_id, tenant_id, http_content['wim_account'])
-        # check update succeeded
-        return format_out({"wim_account": account})
-
-    @route('PUT', '/<tenant_id>/wims/<wim_id>')
-    def http_update_wim_accounts(self, tenant_id, wim_id):
-        """Edit the association of an existing wim to this tenant"""
-        tenant_id = None if tenant_id == 'any' else tenant_id
-        # parse input data
-        http_content, _ = format_in(
-            wim_account_schema, confidential_data=True)
-        removed = utils.remove_extra_items(http_content, wim_account_schema)
-        removed and self.logger.debug("Remove extra items %r", removed)
-        accounts = self.engine.update_wim_accounts(
-            wim_id, tenant_id, http_content['wim_account'])
-
-        if tenant_id:
-            return format_out({'wim_account': accounts[0]})
-
-        return format_out({'wim_accounts': accounts})
-
-    @route('DELETE', '/<tenant_id>/wims/<wim_id>')
-    def http_delete_wim_accounts(self, tenant_id, wim_id):
-        """Deassociate an existing wim to this tenant"""
-        tenant_id = None if tenant_id == 'any' else tenant_id
-        accounts = self.engine.delete_wim_accounts(wim_id, tenant_id,
-                                                   error_if_none=True)
-
-        properties = (
-            (account['name'], wim_id,
-             utils.safe_get(account, 'association.nfvo_tenant_id', tenant_id))
-            for account in accounts)
-
-        return format_out({
-            'result': '\n'.join('WIM account `{}` deleted. '
-                                'Tenant `{}` detached from WIM `{}`'
-                                .format(*p) for p in properties)
-        })
-
-    @route('POST', '/<tenant_id>/wims/<wim_id>/port_mapping')
-    def http_create_wim_port_mappings(self, tenant_id, wim_id):
-        """Set the wim port mapping for a wim"""
-        # parse input data
-        http_content, _ = format_in(wim_port_mapping_schema)
-
-        data = self.engine.create_wim_port_mappings(
-            wim_id, http_content['wim_port_mapping'], tenant_id)
-        return format_out({"wim_port_mapping": data})
-
-    @route('GET', '/<tenant_id>/wims/<wim_id>/port_mapping')
-    def http_get_wim_port_mappings(self, tenant_id, wim_id):
-        """Get wim port mapping details"""
-        # TODO: tenant_id is never used, so it should be removed
-        data = self.engine.get_wim_port_mappings(wim_id)
-        return format_out({"wim_port_mapping": data})
-
-    @route('DELETE', '/<tenant_id>/wims/<wim_id>/port_mapping')
-    def http_delete_wim_port_mappings(self, tenant_id, wim_id):
-        """Clean wim port mapping"""
-        # TODO: tenant_id is never used, so it should be removed
-        data = self.engine.delete_wim_port_mappings(wim_id)
-        return format_out({"result": data})
diff --git a/osm_ro/wim/persistence.py b/osm_ro/wim/persistence.py
deleted file mode 100644 (file)
index 74f7dc6..0000000
+++ /dev/null
@@ -1,1010 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-"""This module contains only logic related to managing records in a database
-which includes data format normalization, data format validation and etc.
-(It works as an extension to `nfvo_db.py` for the WIM feature)
-
-No domain logic/architectural concern should be present in this file.
-"""
-import json
-import logging
-from contextlib import contextmanager
-from hashlib import sha1
-from itertools import groupby
-from operator import itemgetter
-from sys import exc_info
-# from time import time
-from uuid import uuid1 as generate_uuid
-
-from six import reraise
-
-import yaml
-
-from ..utils import (
-    check_valid_uuid,
-    convert_float_timestamp2str,
-    expand_joined_fields,
-    filter_dict_keys,
-    filter_out_dict_keys,
-    merge_dicts,
-    remove_none_items
-)
-from .errors import (
-    DbBaseException,
-    InvalidParameters,
-    MultipleRecordsFound,
-    NoRecordFound,
-    UndefinedUuidOrName,
-    UndefinedWanMappingType,
-    UnexpectedDatabaseError,
-    WimAccountOverwrite,
-    WimAndTenantAlreadyAttached
-)
-
-_WIM = 'wims AS wim '
-
-_WIM_JOIN = (
-    _WIM +
-    ' JOIN wim_nfvo_tenants AS association '
-    '   ON association.wim_id=wim.uuid '
-    ' JOIN nfvo_tenants AS nfvo_tenant '
-    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
-    ' JOIN wim_accounts AS wim_account '
-    '   ON association.wim_account_id=wim_account.uuid '
-)
-
-_WIM_ACCOUNT_JOIN = (
-    'wim_accounts AS wim_account '
-    ' JOIN wim_nfvo_tenants AS association '
-    '   ON association.wim_account_id=wim_account.uuid '
-    ' JOIN wims AS wim '
-    '   ON association.wim_id=wim.uuid '
-    ' JOIN nfvo_tenants AS nfvo_tenant '
-    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
-)
-
-_DATACENTER_JOIN = (
-    'datacenters AS datacenter '
-    ' JOIN tenants_datacenters AS association '
-    '   ON association.datacenter_id=datacenter.uuid '
-    ' JOIN datacenter_tenants as datacenter_account '
-    '   ON association.datacenter_tenant_id=datacenter_account.uuid '
-    ' JOIN nfvo_tenants AS nfvo_tenant '
-    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
-)
-
-_PORT_MAPPING = 'wim_port_mappings as wim_port_mapping '
-
-_PORT_MAPPING_JOIN_WIM = (
-    ' JOIN wims as wim '
-    '   ON wim_port_mapping.wim_id=wim.uuid '
-)
-
-_PORT_MAPPING_JOIN_DATACENTER = (
-    ' JOIN datacenters as datacenter '
-    '   ON wim_port_mapping.datacenter_id=datacenter.uuid '
-)
-
-_WIM_SELECT = [
-    'wim.{0} as {0}'.format(_field)
-    for _field in 'uuid name description wim_url type config '
-                  'created_at modified_at'.split()
-]
-
-_WIM_ACCOUNT_SELECT = 'uuid name user password config'.split()
-
-_PORT_MAPPING_SELECT = ('wim_port_mapping.*', )
-
-_CONFIDENTIAL_FIELDS = ('password', 'passwd')
-
-_SERIALIZED_FIELDS = ('config', 'vim_info', 'wim_info', 'conn_info', 'extra',
-                      'wan_service_mapping_info')
-
-UNIQUE_PORT_MAPPING_INFO_FIELDS = {
-    'dpid-port': ('wan_switch_dpid', 'wan_switch_port')
-}
-"""Fields that should be unique for each port mapping that relies on
-wan_service_mapping_info.
-
-For example, for port mappings of type 'dpid-port', each combination of
-wan_switch_dpid and wan_switch_port should be unique (the same switch cannot
-be connected to two different places using the same port)
-"""
-
-
-class WimPersistence(object):
-    """High level interactions with the WIM tables in the database"""
-
-    def __init__(self, db, logger=None):
-        self.db = db
-        self.logger = logger or logging.getLogger('openmano.wim.persistence')
-
-    def query(self,
-              FROM=None,
-              SELECT=None,
-              WHERE=None,
-              ORDER_BY=None,
-              LIMIT=None,
-              OFFSET=None,
-              error_if_none=True,
-              error_if_multiple=False,
-              postprocess=None,
-              hide=_CONFIDENTIAL_FIELDS,
-              **kwargs):
-        """Retrieve records from the database.
-
-        Keyword Arguments:
-            SELECT, FROM, WHERE, LIMIT, ORDER_BY: used to compose the SQL
-                query. See ``nfvo_db.get_rows``.
-            OFFSET: only valid when used togheter with LIMIT.
-                    Ignore the OFFSET first results of the query.
-            error_if_none: by default an error is raised if no record is
-                found. With this option it is possible to disable this error.
-            error_if_multiple: by default no error is raised if more then one
-                record is found.
-                With this option it is possible to enable this error.
-            postprocess: function applied to every retrieved record.
-                This function receives a dict as input and must return it
-                after modifications. Moreover this function should accept a
-                second optional parameter ``hide`` indicating
-                the confidential fiels to be obfuscated.
-                By default a minimal postprocessing function is applied,
-                obfuscating confidential fields and converting timestamps.
-            hide: option proxied to postprocess
-
-        All the remaining keyword arguments will be assumed to be ``name``s or
-        ``uuid``s to compose the WHERE statement, according to their format.
-        If the value corresponds to an array, the first element will determine
-        if it is an name or UUID.
-
-        For example:
-            - ``wim="abcdef"``` will be turned into ``wim.name="abcdef"``,
-            - ``datacenter="5286a274-8a1b-4b8d-a667-9c94261ad855"``
-               will be turned into
-               ``datacenter.uuid="5286a274-8a1b-4b8d-a667-9c94261ad855"``.
-            - ``wim=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
-               will be turned into
-               ``wim.uuid=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
-
-        Raises:
-            NoRecordFound: if the query result set is empty
-            DbBaseException: errors occuring during the execution of the query.
-        """
-        # Defaults:
-        postprocess = postprocess or _postprocess_record
-        WHERE = WHERE or {}
-
-        # Find remaining keywords by name or uuid
-        WHERE.update(_compose_where_from_uuids_or_names(**kwargs))
-        WHERE = WHERE or None
-        # ^ If the where statement is empty, it is better to leave it as None,
-        #   so it can be filtered out at a later stage
-        LIMIT = ('{:d},{:d}'.format(OFFSET, LIMIT)
-                 if LIMIT and OFFSET else LIMIT)
-
-        query = remove_none_items({
-            'SELECT': SELECT, 'FROM': FROM, 'WHERE': WHERE,
-            'LIMIT': LIMIT, 'ORDER_BY': ORDER_BY})
-
-        records = self.db.get_rows(**query)
-
-        table = FROM.split()[0]
-        if error_if_none and not records:
-            raise NoRecordFound(WHERE, table)
-
-        if error_if_multiple and len(records) > 1:
-            self.logger.error('Multiple records '
-                              'FROM %s WHERE %s:\n\n%s\n\n',
-                              FROM, WHERE, json.dumps(records, indent=4))
-            raise MultipleRecordsFound(WHERE, table)
-
-        return [
-            expand_joined_fields(postprocess(record, hide))
-            for record in records
-        ]
-
-    def query_one(self, *args, **kwargs):
-        """Similar to ``query``, but ensuring just one result.
-        ``error_if_multiple`` is enabled by default.
-        """
-        kwargs.setdefault('error_if_multiple', True)
-        records = self.query(*args, **kwargs)
-        return records[0] if records else None
-
-    def get_by_uuid(self, table, uuid, **kwargs):
-        """Retrieve one record from the database based on its uuid
-
-        Arguments:
-            table (str): table name (to be used in SQL's FROM statement).
-            uuid (str): unique identifier for record.
-
-        For additional keyword arguments and exceptions see :obj:`~.query`
-        (``error_if_multiple`` is enabled by default).
-        """
-        if uuid is None:
-            raise UndefinedUuidOrName(table)
-        return self.query_one(table, WHERE={'uuid': uuid}, **kwargs)
-
-    def get_by_name_or_uuid(self, table, uuid_or_name, **kwargs):
-        """Retrieve a record from the database based on a value that can be its
-        uuid or name.
-
-        Arguments:
-            table (str): table name (to be used in SQL's FROM statement).
-            uuid_or_name (str): this value can correspond to either uuid or
-                name
-        For additional keyword arguments and exceptions see :obj:`~.query`
-        (``error_if_multiple`` is enabled by default).
-        """
-        if uuid_or_name is None:
-            raise UndefinedUuidOrName(table)
-
-        key = 'uuid' if check_valid_uuid(uuid_or_name) else 'name'
-        return self.query_one(table, WHERE={key: uuid_or_name}, **kwargs)
-
-    def get_wims(self, uuid_or_name=None, tenant=None, **kwargs):
-        """Retrieve information about one or more WIMs stored in the database
-
-        Arguments:
-            uuid_or_name (str): uuid or name for WIM
-            tenant (str): [optional] uuid or name for NFVO tenant
-
-        See :obj:`~.query` for additional keyword arguments.
-        """
-        kwargs.update(wim=uuid_or_name, tenant=tenant)
-        from_ = _WIM_JOIN if tenant else _WIM
-        select_ = _WIM_SELECT[:] + (['wim_account.*'] if tenant else [])
-
-        kwargs.setdefault('SELECT', select_)
-        return self.query(from_, **kwargs)
-
-    def get_wim(self, wim, tenant=None, **kwargs):
-        """Similar to ``get_wims`` but ensure only one result is returned"""
-        kwargs.setdefault('error_if_multiple', True)
-        return self.get_wims(wim, tenant)[0]
-
-    def create_wim(self, wim_descriptor):
-        """Create a new wim record inside the database and returns its uuid
-
-        Arguments:
-            wim_descriptor (dict): properties of the record
-                (usually each field corresponds to a database column, but extra
-                information can be offloaded to another table or serialized as
-                JSON/YAML)
-        Returns:
-            str: UUID of the created WIM
-        """
-        if "config" in wim_descriptor:
-            wim_descriptor["config"] = _serialize(wim_descriptor["config"])
-
-        return self.db.new_row(
-            "wims", wim_descriptor, add_uuid=True, confidential_data=True)
-
-    def update_wim(self, uuid_or_name, wim_descriptor):
-        """Change an existing WIM record on the database"""
-        # obtain data, check that only one exist
-        wim = self.get_by_name_or_uuid('wims', uuid_or_name)
-
-        # edit data
-        wim_id = wim['uuid']
-        where = {'uuid': wim['uuid']}
-
-        # unserialize config, edit and serialize it again
-        new_config_dict = wim_descriptor.get('config', {}) or {}
-        config_dict = remove_none_items(merge_dicts(
-            wim.get('config', {}) or {}, new_config_dict))
-        wim_descriptor['config'] = (
-            _serialize(config_dict) if config_dict else None)
-
-        self.db.update_rows('wims', wim_descriptor, where)
-
-        return wim_id
-
-    def delete_wim(self, wim):
-        # get nfvo_tenant info
-        wim = self.get_by_name_or_uuid('wims', wim)
-
-        self.db.delete_row_by_id('wims', wim['uuid'])
-
-        return wim['uuid'] + ' ' + wim['name']
-
-    def get_wim_accounts_by(self, wim=None, tenant=None, uuid=None, **kwargs):
-        """Retrieve WIM account information from the database together
-        with the related records (wim, nfvo_tenant and wim_nfvo_tenant)
-
-        Arguments:
-            wim (str): uuid or name for WIM
-            tenant (str): [optional] uuid or name for NFVO tenant
-
-        See :obj:`~.query` for additional keyword arguments.
-        """
-        kwargs.update(wim=wim, tenant=tenant)
-        kwargs.setdefault('postprocess', _postprocess_wim_account)
-        if uuid:
-            kwargs.setdefault('WHERE', {'wim_account.uuid': uuid})
-        return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
-
-    def get_wim_account_by(self, wim=None, tenant=None, uuid=None, **kwargs):
-        """Similar to ``get_wim_accounts_by``, but ensuring just one result"""
-        kwargs.setdefault('error_if_multiple', True)
-        return self.get_wim_accounts_by(wim, tenant, uuid, **kwargs)[0]
-
-    def get_wim_accounts(self, **kwargs):
-        """Retrieve all the accounts from the database"""
-        kwargs.setdefault('postprocess', _postprocess_wim_account)
-        return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
-
-    def get_wim_account(self, uuid_or_name, **kwargs):
-        """Retrieve WIM Account record by UUID or name,
-        See :obj:`get_by_name_or_uuid` for keyword arguments.
-        """
-        kwargs.setdefault('postprocess', _postprocess_wim_account)
-        kwargs.setdefault('SELECT', _WIM_ACCOUNT_SELECT)
-        return self.get_by_name_or_uuid('wim_accounts', uuid_or_name, **kwargs)
-
-    @contextmanager
-    def _associate(self, wim_id, nfvo_tenant_id):
-        """Auxiliary method for ``create_wim_account``
-
-        This method just create a row in the association table
-        ``wim_nfvo_tenants``
-        """
-        try:
-            yield
-        except DbBaseException as db_exception:
-            error_msg = str(db_exception)
-            if all([msg in error_msg
-                    for msg in ("already in use", "'wim_nfvo_tenant'")]):
-                ex = WimAndTenantAlreadyAttached(wim_id, nfvo_tenant_id)
-                reraise(ex.__class__, ex, exc_info()[2])
-
-            raise
-
-    def create_wim_account(self, wim, tenant, properties):
-        """Associate a wim to a tenant using the ``wim_nfvo_tenants`` table
-        and create a ``wim_account`` to store credentials and configurations.
-
-        For the sake of simplification, we assume that each NFVO tenant can be
-        attached to a WIM using only one WIM account. This is automatically
-        guaranteed via database constraints.
-        For corner cases, the same WIM can be registered twice using another
-        name.
-
-        Arguments:
-            wim (str): name or uuid of the WIM related to the account being
-                created
-            tenant (str): name or uuid of the nfvo tenant to which the account
-                will be created
-            properties (dict): properties of the account
-                (eg. user, password, ...)
-        """
-        wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']
-        tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,
-                                          SELECT=['uuid', 'name'])
-        account = properties.setdefault('name', tenant['name'])
-
-        wim_account = self.query_one('wim_accounts',
-                                     WHERE={'wim_id': wim_id, 'name': account},
-                                     error_if_none=False)
-
-        transaction = []
-        used_uuids = []
-
-        if wim_account is None:
-            # If a row for the wim account doesn't exist yet, we need to
-            # create one, otherwise we can just re-use it.
-            account_id = str(generate_uuid())
-            used_uuids.append(account_id)
-            row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)
-            transaction.append({'wim_accounts': _preprocess_wim_account(row)})
-        else:
-            account_id = wim_account['uuid']
-            properties.pop('config', None)  # Config is too complex to compare
-            diff = {k: v for k, v in properties.items() if v != wim_account[k]}
-            if diff:
-                tip = 'Edit the account first, and then attach it to a tenant'
-                raise WimAccountOverwrite(wim_account, diff, tip)
-
-        transaction.append({
-            'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],
-                                 'wim_id': wim_id,
-                                 'wim_account_id': account_id}})
-
-        with self._associate(wim_id, tenant['uuid']):
-            self.db.new_rows(transaction, used_uuids, confidential_data=True)
-
-        return account_id
-
-    def update_wim_account(self, uuid, properties, hide=_CONFIDENTIAL_FIELDS):
-        """Update WIM account record by overwriting fields with new values
-
-        Specially for the field ``config`` this means that a new dict will be
-        merged to the existing one.
-
-        Attributes:
-            uuid (str): UUID for the WIM account
-            properties (dict): fields that should be overwritten
-
-        Returns:
-            Updated wim_account
-        """
-        wim_account = self.get_by_uuid('wim_accounts', uuid)
-        safe_fields = 'user password name created'.split()
-        updates = _preprocess_wim_account(
-            merge_dicts(wim_account, filter_dict_keys(properties, safe_fields))
-        )
-
-        if properties.get('config'):
-            old_config = wim_account.get('config') or {}
-            new_config = merge_dicts(old_config, properties['config'])
-            updates['config'] = _serialize(new_config)
-
-        num_changes = self.db.update_rows('wim_accounts', UPDATE=updates,
-                                          WHERE={'uuid': wim_account['uuid']})
-
-        if num_changes is None:
-            raise UnexpectedDatabaseError('Impossible to update wim_account '
-                                          '{name}:{uuid}'.format(*wim_account))
-
-        return self.get_wim_account(wim_account['uuid'], hide=hide)
-
-    def delete_wim_account(self, uuid):
-        """Remove WIM account record from the database"""
-        # Since we have foreign keys configured with ON CASCADE, we can rely
-        # on the database engine to guarantee consistency, deleting the
-        # dependant records
-        return self.db.delete_row_by_id('wim_accounts', uuid)
-
-    def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):
-        """Retrieve datacenter information from the database together
-        with the related records (nfvo_tenant)
-
-        Arguments:
-            datacenter (str): uuid or name for datacenter
-            tenant (str): [optional] uuid or name for NFVO tenant
-
-        See :obj:`~.query` for additional keyword arguments.
-        """
-        if tenant:
-            kwargs.update(datacenter=datacenter, tenant=tenant)
-            return self.query(_DATACENTER_JOIN, **kwargs)
-        else:
-            return [self.get_by_name_or_uuid('datacenters',
-                                             datacenter, **kwargs)]
-
-    def get_datacenter_by(self, datacenter=None, tenant=None, **kwargs):
-        """Similar to ``get_datacenters_by``, but ensuring just one result"""
-        kwargs.setdefault('error_if_multiple', True)
-        return self.get_datacenters_by(datacenter, tenant, **kwargs)[0]
-
-    def _create_single_port_mapping(self, properties):
-        info = properties.setdefault('wan_service_mapping_info', {})
-        endpoint_id = properties.get('wan_service_endpoint_id')
-
-        if info.get('mapping_type') and not endpoint_id:
-            properties['wan_service_endpoint_id'] = (
-                self._generate_port_mapping_id(info))
-
-        properties['wan_service_mapping_info'] = _serialize(info)
-
-        try:
-            self.db.new_row('wim_port_mappings', properties,
-                            add_uuid=False, confidential_data=True)
-        except DbBaseException as old_exception:
-            self.logger.exception(old_exception)
-            ex = InvalidParameters(
-                "The mapping must contain the "
-                "'pop_switch_dpid', 'pop_switch_port',  and "
-                "wan_service_mapping_info: "
-                "('wan_switch_dpid' and 'wan_switch_port') or "
-                "'wan_service_endpoint_id}'")
-            reraise(ex.__class__, ex, exc_info()[2])
-
-        return properties
-
-    def create_wim_port_mappings(self, wim, port_mappings, tenant=None):
-        if not isinstance(wim, dict):
-            wim = self.get_by_name_or_uuid('wims', wim)
-
-        for port_mapping in port_mappings:
-            port_mapping['wim_name'] = wim['name']
-            datacenter = self.get_datacenter_by(
-                port_mapping['datacenter_name'], tenant)
-            for pop_wan_port_mapping in port_mapping['pop_wan_mappings']:
-                element = merge_dicts(pop_wan_port_mapping, {
-                    'wim_id': wim['uuid'],
-                    'datacenter_id': datacenter['uuid']})
-                self._create_single_port_mapping(element)
-
-        return port_mappings
-
-    def _filter_port_mappings_by_tenant(self, mappings, tenant):
-        """Make sure all the datacenters and wims listed in the port mapping
-        belong to an specific tenant
-        """
-
-        # NOTE: Theoretically this could be done at SQL level, but given the
-        #       number of tables involved (wim_port_mappings, wim_accounts,
-        #       wims, wim_nfvo_tenants, datacenters, datacenter_tenants,
-        #       tenants_datacents and nfvo_tenants), it would result in a
-        #       extremely complex query. Moreover, the predicate can vary:
-        #       for `get_wim_port_mappings` we can have any combination of
-        #       (wim, datacenter, tenant), not all of them having the 3 values
-        #       so we have combinatorial trouble to write the 'FROM' statement.
-
-        kwargs = {'tenant': tenant, 'error_if_none': False}
-        # Cache results to speedup things
-        datacenters = {}
-        wims = {}
-
-        def _get_datacenter(uuid):
-            return (
-                datacenters.get(uuid) or
-                datacenters.setdefault(
-                    uuid, self.get_datacenters_by(uuid, **kwargs)))
-
-        def _get_wims(uuid):
-            return (wims.get(uuid) or
-                    wims.setdefault(uuid, self.get_wims(uuid, **kwargs)))
-
-        return [
-            mapping
-            for mapping in mappings
-            if (_get_datacenter(mapping['datacenter_id']) and
-                _get_wims(mapping['wim_id']))
-        ]
-
-    def get_wim_port_mappings(self, wim=None, datacenter=None, tenant=None,
-                              **kwargs):
-        """List all the port mappings, optionally filtering by wim, datacenter
-        AND/OR tenant
-        """
-        from_ = [_PORT_MAPPING,
-                 _PORT_MAPPING_JOIN_WIM if wim else '',
-                 _PORT_MAPPING_JOIN_DATACENTER if datacenter else '']
-
-        criteria = ('wim_id', 'datacenter_id')
-        kwargs.setdefault('error_if_none', False)
-        mappings = self.query(
-            ' '.join(from_),
-            SELECT=_PORT_MAPPING_SELECT,
-            ORDER_BY=['wim_port_mapping.{}'.format(c) for c in criteria],
-            wim=wim, datacenter=datacenter,
-            postprocess=_postprocess_wim_port_mapping,
-            **kwargs)
-
-        if tenant:
-            mappings = self._filter_port_mappings_by_tenant(mappings, tenant)
-
-        # We don't have to sort, since we have used 'ORDER_BY'
-        grouped_mappings = groupby(mappings, key=itemgetter(*criteria))
-
-        return [
-            {'wim_id': key[0],
-             'datacenter_id': key[1],
-             'pop_wan_mappings': [
-                 filter_out_dict_keys(mapping, (
-                     'id', 'wim_id', 'datacenter_id',
-                     'created_at', 'modified_at'))
-                 for mapping in group]}
-            for key, group in grouped_mappings
-        ]
-
-    def delete_wim_port_mappings(self, wim_id):
-        self.db.delete_row(FROM='wim_port_mappings', WHERE={"wim_id": wim_id})
-        return "port mapping for wim {} deleted.".format(wim_id)
-
-    def update_wim_port_mapping(self, id, properties):
-        original = self.query_one('wim_port_mappings', WHERE={'id': id})
-
-        mapping_info = remove_none_items(merge_dicts(
-            original.get('wan_service_mapping_info') or {},
-            properties.get('wan_service_mapping_info') or {}))
-
-        updates = preprocess_record(
-            merge_dicts(original, remove_none_items(properties),
-                        wan_service_mapping_info=mapping_info))
-
-        num_changes = self.db.update_rows('wim_port_mappings',
-                                          UPDATE=updates, WHERE={'id': id})
-
-        if num_changes is None:
-            raise UnexpectedDatabaseError(
-                'Impossible to update wim_port_mappings {}:\n{}\n'.format(
-                    id, _serialize(properties))
-            )
-
-        return num_changes
-
-    def get_actions_in_groups(self, wim_account_id,
-                              item_types=('instance_wim_nets',),
-                              group_offset=0, group_limit=150):
-        """Retrieve actions from the database in groups.
-        Each group contains all the actions that have the same ``item`` type
-        and ``item_id``.
-
-        Arguments:
-            wim_account_id: restrict the search to actions to be performed
-                using the same account
-            item_types (list): [optional] filter the actions to the given
-                item types
-            group_limit (int): maximum number of groups returned by the
-                function
-            group_offset (int): skip the N first groups. Used together with
-                group_limit for pagination purposes.
-
-        Returns:
-            List of groups, where each group is a tuple ``(key, actions)``.
-            In turn, ``key`` is a tuple containing the values of
-            ``(item, item_id)`` used to create the group and ``actions`` is a
-            list of ``vim_wim_actions`` records (dicts).
-        """
-
-        type_options = set(
-            '"{}"'.format(self.db.escape_string(t)) for t in item_types)
-
-        items = ('SELECT DISTINCT a.item, a.item_id, a.wim_account_id '
-                 'FROM vim_wim_actions AS a '
-                 'WHERE a.wim_account_id="{}" AND a.item IN ({}) '
-                 'ORDER BY a.item, a.item_id '
-                 'LIMIT {:d},{:d}').format(
-                     self.safe_str(wim_account_id),
-                     ','.join(type_options),
-                     group_offset, group_limit)
-
-        join = 'vim_wim_actions NATURAL JOIN ({}) AS items'.format(items)
-        db_results = self.db.get_rows(
-            FROM=join, ORDER_BY=('item', 'item_id', 'created_at'))
-
-        results = (_postprocess_action(r) for r in db_results)
-        criteria = itemgetter('item', 'item_id')
-        return [(k, list(g)) for k, g in groupby(results, key=criteria)]
-
-    def update_action(self, instance_action_id, task_index, properties):
-        condition = {'instance_action_id': instance_action_id,
-                     'task_index': task_index}
-        try:
-            action = self.query_one('vim_wim_actions', WHERE=condition)
-        except Exception:
-            actions = self.query('vim_wim_actions', WHERE=condition)
-            self.logger.error('More then one action found:\n%s',
-                              json.dumps(actions, indent=4))
-            action = actions[0]
-
-        extra = remove_none_items(merge_dicts(
-            action.get('extra') or {},
-            properties.get('extra') or {}))
-
-        updates = preprocess_record(
-            merge_dicts(action, properties, extra=extra))
-
-        num_changes = self.db.update_rows('vim_wim_actions', UPDATE=updates, WHERE=condition)
-
-        if num_changes is None:
-            raise UnexpectedDatabaseError(
-                'Impossible to update vim_wim_actions '
-                '{instance_action_id}[{task_index}]'.format(*action))
-
-        return num_changes
-
-    def get_wan_links(self, uuid=None, **kwargs):
-        """Retrieve WAN link records from the database
-
-        Keyword Arguments:
-            uuid, instance_scenario_id, sce_net_id, wim_id, wim_account_id:
-                attributes that can be used at the WHERE clause
-        """
-        kwargs.setdefault('uuid', uuid)
-        kwargs.setdefault('error_if_none', False)
-
-        criteria_fields = ('uuid', 'instance_scenario_id', 'sce_net_id',
-                           'wim_id', 'wim_account_id')
-        criteria = remove_none_items(filter_dict_keys(kwargs, criteria_fields))
-        kwargs = filter_out_dict_keys(kwargs, criteria_fields)
-
-        return self.query('instance_wim_nets', WHERE=criteria, **kwargs)
-
-    def update_wan_link(self, uuid, properties):
-        wan_link = self.get_by_uuid('instance_wim_nets', uuid)
-
-        wim_info = remove_none_items(merge_dicts(
-            wan_link.get('wim_info') or {},
-            properties.get('wim_info') or {}))
-
-        updates = preprocess_record(
-            merge_dicts(wan_link, properties, wim_info=wim_info))
-
-        self.logger.debug({'UPDATE': updates})
-        num_changes = self.db.update_rows(
-            'instance_wim_nets', UPDATE=updates,
-            WHERE={'uuid': wan_link['uuid']})
-
-        if num_changes is None:
-            raise UnexpectedDatabaseError(
-                'Impossible to update instance_wim_nets ' + wan_link['uuid'])
-
-        return num_changes
-
-    def get_instance_nets(self, instance_scenario_id, sce_net_id, **kwargs):
-        """Retrieve all the instance nets related to the same instance_scenario
-        and scenario network
-        """
-        return self.query(
-            'instance_nets',
-            WHERE={'instance_scenario_id': instance_scenario_id,
-                   'sce_net_id': sce_net_id},
-            ORDER_BY=kwargs.pop(
-                'ORDER_BY', ('instance_scenario_id', 'sce_net_id')),
-            **kwargs)
-
-    def update_instance_action_counters(self, uuid, failed=None, done=None):
-        """Atomically increment/decrement number_done and number_failed fields
-        in the instance action table
-        """
-        changes = remove_none_items({
-            'number_failed': failed and {'INCREMENT': failed},
-            'number_done': done and {'INCREMENT': done}
-        })
-
-        if not changes:
-            return 0
-
-        return self.db.update_rows('instance_actions', WHERE={'uuid': uuid}, UPDATE=changes)
-
-    def get_only_vm_with_external_net(self, instance_net_id, **kwargs):
-        """Return an instance VM if that is the only VM connected to an
-        external network identified by instance_net_id
-        """
-        counting = ('SELECT DISTINCT instance_net_id '
-                    'FROM instance_interfaces '
-                    'WHERE instance_net_id="{}" AND type="external" '
-                    'GROUP BY instance_net_id '
-                    'HAVING COUNT(*)=1').format(self.safe_str(instance_net_id))
-
-        vm_item = ('SELECT DISTINCT instance_vm_id '
-                   'FROM instance_interfaces NATURAL JOIN ({}) AS a'
-                   .format(counting))
-
-        return self.query_one(
-            'instance_vms JOIN ({}) as instance_interface '
-            'ON instance_vms.uuid=instance_interface.instance_vm_id'
-            .format(vm_item), **kwargs)
-
-    def safe_str(self, string):
-        """Return a SQL safe string"""
-        return self.db.escape_string(string)
-
-    def reconnect(self):
-        self.db.reconnect()
-
-    def _generate_port_mapping_id(self, mapping_info):
-        """Given a port mapping represented by a dict with a 'type' field,
-        generate a unique string, in a injective way.
-        """
-        mapping_info = mapping_info.copy()  # Avoid mutating original object
-        mapping_type = mapping_info.pop('mapping_type', None)
-        if not mapping_type:
-            raise UndefinedWanMappingType(mapping_info)
-
-        unique_fields = UNIQUE_PORT_MAPPING_INFO_FIELDS.get(mapping_type)
-
-        if unique_fields:
-            mapping_info = filter_dict_keys(mapping_info, unique_fields)
-        else:
-            self.logger.warning('Unique fields for WIM port mapping of type '
-                                '%s not defined. Please add a list of fields '
-                                'which combination should be unique in '
-                                'UNIQUE_PORT_MAPPING_INFO_FIELDS '
-                                '(`wim/persistency.py) ', mapping_type)
-
-        repeatable_repr = json.dumps(mapping_info, encoding='utf-8',
-                                     sort_keys=True, indent=False)
-
-        return ':'.join([mapping_type, _str2id(repeatable_repr)])
-
-
-def _serialize(value):
-    """Serialize an arbitrary value in a consistent way,
-    so it can be stored in a database inside a text field
-    """
-    return yaml.safe_dump(value, default_flow_style=True, width=256)
-
-
-def _unserialize(text):
-    """Unserialize text representation into an arbitrary value,
-    so it can be loaded from the database
-    """
-    return yaml.safe_load(text)
-
-
-def preprocess_record(record):
-    """Small transformations to be applied to the data that cames from the
-    user before writing it to the database. By default, filter out timestamps,
-    and serialize the ``config`` field.
-    """
-    automatic_fields = ['created_at', 'modified_at']
-    record = serialize_fields(filter_out_dict_keys(record, automatic_fields))
-
-    return record
-
-
-def _preprocess_wim_account(wim_account):
-    """Do the default preprocessing and convert the 'created' field from
-    boolean to string
-    """
-    wim_account = preprocess_record(wim_account)
-
-    created = wim_account.get('created')
-    wim_account['created'] = (
-        'true' if created is True or created == 'true' else 'false')
-
-    return wim_account
-
-
-def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):
-    """By default, hide passwords fields, unserialize ``config`` fields, and
-    convert float timestamps to strings
-    """
-    record = hide_confidential_fields(record, hide)
-    record = unserialize_fields(record, hide)
-
-    convert_float_timestamp2str(record)
-
-    return record
-
-
-def _postprocess_action(action):
-    if action.get('extra'):
-        action['extra'] = _unserialize(action['extra'])
-
-    return action
-
-
-def _postprocess_wim_account(wim_account, hide=_CONFIDENTIAL_FIELDS):
-    """Do the default postprocessing and convert the 'created' field from
-    string to boolean
-    """
-    # Fix fields from join
-    for field in ('type', 'description', 'wim_url'):
-        if field in wim_account:
-            wim_account['wim.'+field] = wim_account.pop(field)
-
-    for field in ('id', 'nfvo_tenant_id', 'wim_account_id'):
-        if field in wim_account:
-            wim_account['association.'+field] = wim_account.pop(field)
-
-    wim_account = _postprocess_record(wim_account, hide)
-
-    created = wim_account.get('created')
-    wim_account['created'] = (created is True or created == 'true')
-
-    return wim_account
-
-
-def _postprocess_wim_port_mapping(mapping, hide=_CONFIDENTIAL_FIELDS):
-    mapping = _postprocess_record(mapping, hide=hide)
-    mapping_info = mapping.get('wan_service_mapping_info', None) or {}
-    mapping['wan_service_mapping_info'] = mapping_info
-    return mapping
-
-
-def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):
-    """Obfuscate confidential fields from the input dict.
-
-    Note:
-        This function performs a SHALLOW operation.
-    """
-    if not(isinstance(record, dict) and fields):
-        return record
-
-    keys = record.iterkeys()
-    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
-
-    return merge_dicts(record, {k: '********' for k in keys if record[k]})
-
-
-def unserialize_fields(record, hide=_CONFIDENTIAL_FIELDS,
-                       fields=_SERIALIZED_FIELDS):
-    """Unserialize fields that where stored in the database as a serialized
-    YAML (or JSON)
-    """
-    keys = record.iterkeys()
-    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
-
-    return merge_dicts(record, {
-        key: hide_confidential_fields(_unserialize(record[key]), hide)
-        for key in keys if record[key]
-    })
-
-
-def serialize_fields(record, fields=_SERIALIZED_FIELDS):
-    """Serialize fields to be stored in the database as YAML"""
-    keys = record.iterkeys()
-    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
-
-    return merge_dicts(record, {
-        key: _serialize(record[key])
-        for key in keys if record[key] is not None
-    })
-
-
-def _decide_name_or_uuid(value):
-    reference = value
-
-    if isinstance(value, (list, tuple)):
-        reference = value[0] if value else ''
-
-    return 'uuid' if check_valid_uuid(reference) else 'name'
-
-
-def _compose_where_from_uuids_or_names(**conditions):
-    """Create a dict containing the right conditions to be used in a database
-    query.
-
-    This function chooses between ``names`` and ``uuid`` fields based on the
-    format of the passed string.
-    If a list is passed, the first element of the list will be used to choose
-    the name of the field.
-    If a ``None`` value is passed, ``uuid`` is used.
-
-    Note that this function automatically translates ``tenant`` to
-    ``nfvo_tenant`` for the sake of brevity.
-
-    Example:
-        >>> _compose_where_from_uuids_or_names(
-                wim='abcdef',
-                tenant=['xyz123', 'def456']
-                datacenter='5286a274-8a1b-4b8d-a667-9c94261ad855')
-        {'wim.name': 'abcdef',
-         'nfvo_tenant.name': ['xyz123', 'def456']
-         'datacenter.uuid': '5286a274-8a1b-4b8d-a667-9c94261ad855'}
-    """
-    if 'tenant' in conditions:
-        conditions['nfvo_tenant'] = conditions.pop('tenant')
-
-    return {
-        '{}.{}'.format(kind, _decide_name_or_uuid(value)): value
-        for kind, value in conditions.items() if value
-    }
-
-
-def _str2id(text):
-    """Create an ID (following the UUID format) from a piece of arbitrary
-    text.
-
-    Different texts should generate different IDs, and the same text should
-    generate the same ID in a repeatable way.
-    """
-    return sha1(text).hexdigest()
diff --git a/osm_ro/wim/schemas.py b/osm_ro/wim/schemas.py
deleted file mode 100644 (file)
index 101bcb1..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-from ..openmano_schemas import (
-    description_schema,
-    name_schema,
-    nameshort_schema
-)
-
-# WIM -------------------------------------------------------------------------
-wim_types = ["tapi", "onos", "odl", "dynpac", "fake"]
-
-dpid_type = {
-    "type": "string",
-    "pattern":
-        "^[0-9a-zA-Z]+(:[0-9a-zA-Z]+)*$"
-}
-
-port_type = {
-    "oneOf": [
-        {"type": "string",
-         "minLength": 1,
-         "maxLength": 5},
-        {"type": "integer",
-         "minimum": 1,
-         "maximum": 65534}
-    ]
-}
-
-wim_port_mapping_desc = {
-    "type": "array",
-    "items": {
-        "type": "object",
-        "properties": {
-            "datacenter_name": nameshort_schema,
-            "pop_wan_mappings": {
-                "type": "array",
-                "items": {
-                    "type": "object",
-                    "properties": {
-                        "pop_switch_dpid": dpid_type,
-                        "pop_switch_port": port_type,
-                        "wan_service_endpoint_id": name_schema,
-                        "wan_service_mapping_info": {
-                            "type": "object",
-                            "properties": {
-                                "mapping_type": name_schema,
-                                "wan_switch_dpid": dpid_type,
-                                "wan_switch_port": port_type
-                            },
-                            "additionalProperties": True,
-                            "required": ["mapping_type"]
-                        }
-                    },
-                    "anyOf": [
-                        {
-                            "required": [
-                                "pop_switch_dpid",
-                                "pop_switch_port",
-                                "wan_service_endpoint_id"
-                            ]
-                        },
-                        {
-                            "required": [
-                                "pop_switch_dpid",
-                                "pop_switch_port",
-                                "wan_service_mapping_info"
-                            ]
-                        }
-                    ]
-                }
-            }
-        },
-        "required": ["datacenter_name", "pop_wan_mappings"]
-    }
-}
-
-wim_schema_properties = {
-    "name": name_schema,
-    "description": description_schema,
-    "type": {
-        "type": "string",
-        "enum": ["tapi", "onos", "odl", "dynpac", "fake"]
-    },
-    "wim_url": description_schema,
-    "config": {
-        "type": "object",
-        "properties": {
-            "wim_port_mapping": wim_port_mapping_desc
-        }
-    }
-}
-
-wim_schema = {
-    "title": "wim information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "wim": {
-            "type": "object",
-            "properties": wim_schema_properties,
-            "required": ["name", "type", "wim_url"],
-        }
-    },
-    "required": ["wim"],
-}
-
-wim_edit_schema = {
-    "title": "wim edit information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "wim": {
-            "type": "object",
-            "properties": wim_schema_properties,
-        }
-    },
-    "required": ["wim"],
-}
-
-wim_account_schema = {
-    "title": "wim account information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "wim_account": {
-            "type": "object",
-            "properties": {
-                "name": name_schema,
-                "user": nameshort_schema,
-                "password": nameshort_schema,
-                "config": {"type": "object"}
-            },
-        }
-    },
-    "required": ["wim_account"],
-}
-
-wim_port_mapping_schema = {
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "title": "wim mapping information schema",
-    "type": "object",
-    "properties": {
-        "wim_port_mapping": wim_port_mapping_desc
-    },
-    "required": ["wim_port_mapping"]
-}
diff --git a/osm_ro/wim/tests/__init__.py b/osm_ro/wim/tests/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/osm_ro/wim/tests/fixtures.py b/osm_ro/wim/tests/fixtures.py
deleted file mode 100644 (file)
index c39e9d7..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-# pylint: disable=W0621
-
-from __future__ import unicode_literals
-
-import json
-from itertools import izip
-from time import time
-from textwrap import wrap
-
-from six.moves import range
-
-from ...tests.db_helpers import uuid, sha1
-
-NUM_WIMS = 3
-NUM_TENANTS = 2
-NUM_DATACENTERS = 2
-
-
-# In the following functions, the identifiers should be simple integers
-
-
-def wim(identifier=0):
-    return {'name': 'wim%d' % identifier,
-            'uuid': uuid('wim%d' % identifier),
-            'wim_url': 'localhost',
-            'type': 'tapi'}
-
-
-def tenant(identifier=0):
-    return {'name': 'tenant%d' % identifier,
-            'uuid': uuid('tenant%d' % identifier)}
-
-
-def wim_account(wim, tenant):
-    return {'name': 'wim-account%d%d' % (tenant, wim),
-            'uuid': uuid('wim-account%d%d' % (tenant, wim)),
-            'user': 'user%d%d' % (tenant, wim),
-            'password': 'password%d%d' % (tenant, wim),
-            'wim_id': uuid('wim%d' % wim),
-            'created': 'true'}
-
-
-def wim_tenant_association(wim, tenant):
-    return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
-            'wim_id': uuid('wim%d' % wim),
-            'wim_account_id': uuid('wim-account%d%d' % (tenant, wim))}
-
-
-def wim_set(identifier=0, tenant=0):
-    """Records necessary to create a WIM and connect it to a tenant"""
-    return [
-        {'wims': [wim(identifier)]},
-        {'wim_accounts': [wim_account(identifier, tenant)]},
-        {'wim_nfvo_tenants': [wim_tenant_association(identifier, tenant)]}
-    ]
-
-
-def _datacenter_to_switch_port(dc_id, port=None):
-    digits = 16
-    switch = ':'.join(wrap(('%0' + str(digits) + 'x') % int(dc_id), 2))
-    return (switch, str((port or int(dc_id)) + 1))
-
-
-def datacenter(identifier, external_ports_config=False):
-    config = '' if not external_ports_config else json.dumps({
-        'external_connections': [
-            {'condition': {
-                'provider:physical_network': 'provider',
-                'encapsulation_type': 'vlan'},
-                'vim_external_port':
-                dict(izip(('switch', 'port'),
-                          _datacenter_to_switch_port(identifier)))}
-        ]})
-
-    return {'uuid': uuid('dc%d' % identifier),
-            'name': 'dc%d' % identifier,
-            'type': 'openvim',
-            'vim_url': 'localhost',
-            'config': config}
-
-
-def datacenter_account(datacenter, tenant):
-    return {'name': 'dc-account%d%d' % (tenant, datacenter),
-            'uuid': uuid('dc-account%d%d' % (tenant, datacenter)),
-            'datacenter_id': uuid('dc%d' % datacenter),
-            'created': 'true'}
-
-
-def datacenter_tenant_association(datacenter, tenant):
-    return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
-            'datacenter_id': uuid('dc%d' % datacenter),
-            'datacenter_tenant_id': uuid('dc-account%d%d' % (tenant, datacenter))}
-
-
-def datacenter_set(identifier=0, tenant=0):
-    """Records necessary to create a datacenter and connect it to a tenant"""
-    return [
-        {'datacenters': [datacenter(identifier)]},
-        {'datacenter_tenants': [datacenter_account(identifier, tenant)]},
-        {'tenants_datacenters': [
-            datacenter_tenant_association(identifier, tenant)
-        ]}
-    ]
-
-
-def wim_port_mapping(wim, datacenter,
-                     pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA', pop_port=None,
-                     wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB', wan_port=None):
-    mapping_info = {'mapping_type': 'dpid-port',
-                    'wan_switch_dpid': wan_dpid,
-                    'wan_switch_port': (str(wan_port) if wan_port else
-                                        str(int(datacenter) + int(wim) + 1))}
-    id_ = 'dpid-port|' + sha1(json.dumps(mapping_info, sort_keys=True))
-
-    return {'wim_id': uuid('wim%d' % wim),
-            'datacenter_id': uuid('dc%d' % datacenter),
-            'pop_switch_dpid': pop_dpid,
-            'pop_switch_port': (str(pop_port) if pop_port else
-                                str(int(datacenter) + int(wim) + 1)),
-            # ^  Datacenter router have one port managed by each WIM
-            'wan_service_endpoint_id': id_,
-            # ^  WIM managed router have one port connected to each DC
-            'wan_service_mapping_info': json.dumps(mapping_info)}
-
-
-def processed_port_mapping(wim, datacenter,
-                           num_pairs=1,
-                           pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA',
-                           wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB'):
-    """Emulate the response of the Persistence class, where the records in the
-    data base are grouped by wim and datacenter
-    """
-    return {
-        'wim_id': uuid('wim%d' % wim),
-        'datacenter_id': uuid('dc%d' % datacenter),
-        'pop_wan_mappings': [
-            {'pop_switch_dpid': pop_dpid,
-             'pop_switch_port': wim + 1 + i,
-             'wan_service_endpoint_id':
-                 sha1('dpid-port|%s|%d' % (wan_dpid, datacenter + 1 + i)),
-             'wan_service_mapping_info': {
-                 'mapping_type': 'dpid-port',
-                 'wan_switch_dpid': wan_dpid,
-                 'wan_switch_port': datacenter + 1 + i}}
-            for i in range(num_pairs)
-        ]
-    }
-
-
-def consistent_set(num_wims=NUM_WIMS, num_tenants=NUM_TENANTS,
-                   num_datacenters=NUM_DATACENTERS,
-                   external_ports_config=False):
-    return [
-        {'nfvo_tenants': [tenant(i) for i in range(num_tenants)]},
-        {'wims': [wim(j) for j in range(num_wims)]},
-        {'wim_accounts': [
-            wim_account(j, i)
-            for i in range(num_tenants)
-            for j in range(num_wims)
-        ]},
-        {'wim_nfvo_tenants': [
-            wim_tenant_association(j, i)
-            for i in range(num_tenants)
-            for j in range(num_wims)
-        ]},
-        {'datacenters': [
-            datacenter(k, external_ports_config)
-            for k in range(num_datacenters)
-        ]},
-        {'datacenter_tenants': [
-            datacenter_account(k, i)
-            for i in range(num_tenants)
-            for k in range(num_datacenters)
-        ]},
-        {'tenants_datacenters': [
-            datacenter_tenant_association(k, i)
-            for i in range(num_tenants)
-            for k in range(num_datacenters)
-        ]},
-        {'wim_port_mappings': [
-            (wim_port_mapping(j, k, *_datacenter_to_switch_port(k))
-             if external_ports_config else wim_port_mapping(j, k))
-            for j in range(num_wims)
-            for k in range(num_datacenters)
-        ]},
-    ]
-
-
-def instance_nets(num_datacenters=2, num_links=2, status='BUILD'):
-    """Example of multi-site deploy with N datacenters and M WAN links between
-    them (e.g M = 2 -> back and forth)
-    """
-    return [
-        {'uuid': uuid('net%d%d' % (k, l)),
-         'datacenter_id': uuid('dc%d' % k),
-         'datacenter_tenant_id': uuid('dc-account0%d' % k),
-         'instance_scenario_id': uuid('nsr0'),
-         # ^  instance_scenario_id == NS Record id
-         'sce_net_id': uuid('vld%d' % l),
-         # ^  scenario net id == VLD id
-         'status': status,
-         'vim_net_id': None,
-         'created': True}
-        for k in range(num_datacenters)
-        for l in range(num_links)
-    ]
-
-
-def wim_actions(action='CREATE', status='SCHEDULED',
-                action_id=None, instance=0,
-                wim=0, tenant=0, num_links=1):
-    """Create a list of actions for the WIM,
-
-    Arguments:
-        action: type of action (CREATE) by default
-        wim: WIM fixture index to create actions for
-        tenant: tenant fixture index to create actions for
-        num_links: number of WAN links to be established by each WIM
-    """
-
-    action_id = action_id or 'ACTION-{}'.format(time())
-
-    return [
-        {
-            'action': action,
-            'wim_internal_id': uuid('-wim-net%d%d%d' % (wim, instance, link)),
-            'wim_account_id': uuid('wim-account%d%d' % (tenant, wim)),
-            'instance_action_id': action_id,
-            'item': 'instance_wim_nets',
-            'item_id': uuid('wim-net%d%d%d' % (wim, instance, link)),
-            'status': status,
-            'task_index': link,
-            'created_at': time(),
-            'modified_at': time(),
-            'extra': None
-        }
-        for link in range(num_links)
-    ]
-
-
-def instance_action(tenant=0, instance=0, action_id=None,
-                    num_tasks=1, num_done=0, num_failed=0):
-    action_id = action_id or 'ACTION-{}'.format(time())
-
-    return {
-        'uuid': action_id,
-        'tenant_id': uuid('tenant%d' % tenant),
-        'instance_id': uuid('nsr%d' % instance),
-        'number_tasks': num_tasks,
-        'number_done': num_done,
-        'number_failed': num_failed,
-    }
-
-
-def instance_wim_nets(instance=0, wim=0, num_links=1,
-                      status='SCHEDULED_CREATION'):
-    """Example of multi-site deploy with N wims and M WAN links between
-    them (e.g M = 2 -> back and forth)
-    VIM nets
-    """
-    return [
-        {'uuid': uuid('wim-net%d%d%d' % (wim, instance, l)),
-         'wim_id': uuid('wim%d' % wim),
-         'wim_account_id': uuid('wim-account%d' % wim),
-         'wim_internal_id': uuid('-net%d%d' % (wim, l)),
-         'instance_scenario_id': uuid('nsr%d' % instance),
-         # ^  instance_scenario_id == NS Record id
-         'sce_net_id': uuid('vld%d' % l),
-         # ^  scenario net id == VLD id
-         'status': status,
-         'created': False}
-        for l in range(num_links)
-    ]
-
-
-def instance_vm(instance=0, vim_info=None):
-    vim_info = {'OS-EXT-SRV-ATTR:hypervisor_hostname': 'host%d' % instance}
-    return {
-        'uuid': uuid('vm%d' % instance),
-        'instance_vnf_id': uuid('vnf%d' % instance),
-        'vm_id': uuid('vm%d' % instance),
-        'vim_vm_id': uuid('vm%d' % instance),
-        'status': 'ACTIVE',
-        'vim_info': vim_info,
-    }
-
-
-def instance_interface(instance=0, interface=0, datacenter=0, link=0):
-    return {
-        'uuid': uuid('interface%d%d' % (instance, interface)),
-        'instance_vm_id': uuid('vm%d' % instance),
-        'instance_net_id': uuid('net%d%d' % (datacenter, link)),
-        'interface_id': uuid('iface%d' % interface),
-        'type': 'external',
-        'vlan': 3
-    }
diff --git a/osm_ro/wim/tests/test_actions.py b/osm_ro/wim/tests/test_actions.py
deleted file mode 100644 (file)
index cee3c96..0000000
+++ /dev/null
@@ -1,454 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-# pylint: disable=E1101
-
-from __future__ import unicode_literals, print_function
-
-import json
-import unittest
-from time import time
-
-from mock import MagicMock, patch
-
-from . import fixtures as eg
-from ...tests.db_helpers import (
-    TestCaseWithDatabasePerTest,
-    disable_foreign_keys,
-    uuid,
-)
-from ..persistence import WimPersistence, preprocess_record
-from ..wan_link_actions import WanLinkCreate, WanLinkDelete
-from ..wimconn import WimConnectorError
-
-
-class TestActionsWithDb(TestCaseWithDatabasePerTest):
-    def setUp(self):
-        super(TestActionsWithDb, self).setUp()
-        self.persist = WimPersistence(self.db)
-        self.connector = MagicMock()
-        self.ovim = MagicMock()
-
-
-class TestCreate(TestActionsWithDb):
-    @disable_foreign_keys
-    def test_process__instance_nets_on_build(self):
-        # Given we want 1 WAN link between 2 datacenters
-        # and the local network in each datacenter is still being built
-        wan_link = eg.instance_wim_nets()
-        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
-        for net in instance_nets:
-            net['status'] = 'BUILD'
-        self.populate([{'instance_nets': instance_nets,
-                        'instance_wim_nets': wan_link}])
-
-        # When we try to process a CREATE action that refers to the same
-        # instance_scenario_id and sce_net_id
-        now = time()
-        action = WanLinkCreate(eg.wim_actions('CREATE')[0])
-        action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
-        action.sce_net_id = instance_nets[0]['sce_net_id']
-        # -- ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        self.populate([{'vim_wim_actions': action_record}])
-        # <-- #
-        action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should be defered
-        assert action.is_scheduled
-        self.assertEqual(action.extra['attempts'], 1)
-        self.assertGreater(action.extra['last_attempted_at'], now)
-
-    @disable_foreign_keys
-    def test_process__instance_nets_on_error(self):
-        # Given we want 1 WAN link between 2 datacenters
-        # and at least one local network is in a not good state (error, or
-        # being deleted)
-        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
-        instance_nets[1]['status'] = 'SCHEDULED_DELETION'
-        wan_link = eg.instance_wim_nets()
-        self.populate([{'instance_nets': instance_nets,
-                        'instance_wim_nets': wan_link}])
-
-        # When we try to process a CREATE action that refers to the same
-        # instance_scenario_id and sce_net_id
-        action = WanLinkCreate(eg.wim_actions('CREATE')[0])
-        action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
-        action.sce_net_id = instance_nets[0]['sce_net_id']
-        # -- ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        self.populate([{'vim_wim_actions': action_record}])
-        # <-- #
-        action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should fail
-        assert action.is_failed
-        self.assertIn('issue with the local networks', action.error_msg)
-        self.assertIn('SCHEDULED_DELETION', action.error_msg)
-
-    def prepare_create__rules(self):
-        db_state = eg.consistent_set(num_wims=1, num_tenants=1,
-                                     num_datacenters=2,
-                                     external_ports_config=True)
-
-        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
-                                         status='ACTIVE')
-        for i, net in enumerate(instance_nets):
-            net['vim_info'] = {}
-            net['vim_info']['provider:physical_network'] = 'provider'
-            net['vim_info']['encapsulation_type'] = 'vlan'
-            net['vim_info']['encapsulation_id'] = i
-            net['sdn_net_id'] = uuid('sdn-net%d' % i)
-
-        instance_action = eg.instance_action(action_id='ACTION-000')
-
-        db_state += [
-            {'instance_wim_nets': eg.instance_wim_nets()},
-            {'instance_nets': [preprocess_record(r) for r in instance_nets]},
-            {'instance_actions': instance_action}]
-
-        action = WanLinkCreate(
-            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
-        # --> ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        db_state += [{'vim_wim_actions': action_record}]
-
-        return db_state, action
-
-    @disable_foreign_keys
-    def test_process__rules(self):
-        # Given we want 1 WAN link between 2 datacenters
-        # and the local network in each datacenter is already created
-        db_state, action = self.prepare_create__rules()
-        self.populate(db_state)
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        number_done = instance_action['number_done']
-        number_failed = instance_action['number_failed']
-
-        # If the connector works fine
-        with patch.object(self.connector, 'create_connectivity_service',
-                          lambda *_, **__: (uuid('random-id'), None)):
-            # When we try to process a CREATE action that refers to the same
-            # instance_scenario_id and sce_net_id
-            action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should be succeeded
-        db_action = self.persist.query_one('vim_wim_actions', WHERE={
-            'instance_action_id': action.instance_action_id,
-            'task_index': action.task_index})
-        self.assertEqual(db_action['status'], 'DONE')
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        self.assertEqual(instance_action['number_done'], number_done + 1)
-        self.assertEqual(instance_action['number_failed'], number_failed)
-
-    @disable_foreign_keys
-    def test_process__rules_fail(self):
-        # Given we want 1 WAN link between 2 datacenters
-        # and the local network in each datacenter is already created
-        db_state, action = self.prepare_create__rules()
-        self.populate(db_state)
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        number_done = instance_action['number_done']
-        number_failed = instance_action['number_failed']
-
-        # If the connector raises an error
-        with patch.object(self.connector, 'create_connectivity_service',
-                          MagicMock(side_effect=WimConnectorError('foobar'))):
-            # When we try to process a CREATE action that refers to the same
-            # instance_scenario_id and sce_net_id
-            action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should be fail
-        db_action = self.persist.query_one('vim_wim_actions', WHERE={
-            'instance_action_id': action.instance_action_id,
-            'task_index': action.task_index})
-        self.assertEqual(db_action['status'], 'FAILED')
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        self.assertEqual(instance_action['number_done'], number_done)
-        self.assertEqual(instance_action['number_failed'], number_failed + 1)
-
-    def prepare_create__sdn(self):
-        db_state = eg.consistent_set(num_wims=1, num_tenants=1,
-                                     num_datacenters=2,
-                                     external_ports_config=False)
-
-        # Make sure all port_mappings are predictable
-        switch = 'AA:AA:AA:AA:AA:AA:AA:AA'
-        port = 1
-        port_mappings = next(r['wim_port_mappings']
-                             for r in db_state if 'wim_port_mappings' in r)
-        for mapping in port_mappings:
-            mapping['pop_switch_dpid'] = switch
-            mapping['pop_switch_port'] = port
-
-        instance_action = eg.instance_action(action_id='ACTION-000')
-        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
-                                         status='ACTIVE')
-        for i, net in enumerate(instance_nets):
-            net['sdn_net_id'] = uuid('sdn-net%d' % i)
-
-        db_state += [{'instance_nets': instance_nets},
-                     {'instance_wim_nets': eg.instance_wim_nets()},
-                     {'instance_actions': instance_action}]
-
-        action = WanLinkCreate(
-            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
-        # --> ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        db_state += [{'vim_wim_actions': action_record}]
-
-        ovim_patch = patch.object(
-            self.ovim, 'get_ports', MagicMock(return_value=[{
-                'switch_dpid': switch,
-                'switch_port': port,
-            }]))
-
-        return db_state, action, ovim_patch
-
-    @disable_foreign_keys
-    def test_process__sdn(self):
-        # Given we want 1 WAN link between 2 datacenters
-        # and the local network in each datacenter is already created
-        db_state, action, ovim_patch = self.prepare_create__sdn()
-        self.populate(db_state)
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        number_done = instance_action['number_done']
-        number_failed = instance_action['number_failed']
-
-        connector_patch = patch.object(
-            self.connector, 'create_connectivity_service',
-            lambda *_, **__: (uuid('random-id'), None))
-
-        # If the connector works fine
-        with connector_patch, ovim_patch:
-            # When we try to process a CREATE action that refers to the same
-            # instance_scenario_id and sce_net_id
-            action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should be succeeded
-        db_action = self.persist.query_one('vim_wim_actions', WHERE={
-            'instance_action_id': action.instance_action_id,
-            'task_index': action.task_index})
-        self.assertEqual(db_action['status'], 'DONE')
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        self.assertEqual(instance_action['number_done'], number_done + 1)
-        self.assertEqual(instance_action['number_failed'], number_failed)
-
-    @disable_foreign_keys
-    def test_process__sdn_fail(self):
-        # Given we want 1 WAN link between 2 datacenters
-        # and the local network in each datacenter is already created
-        db_state, action, ovim_patch = self.prepare_create__sdn()
-        self.populate(db_state)
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        number_done = instance_action['number_done']
-        number_failed = instance_action['number_failed']
-
-        connector_patch = patch.object(
-            self.connector, 'create_connectivity_service',
-            MagicMock(side_effect=WimConnectorError('foobar')))
-
-        # If the connector throws an error
-        with connector_patch, ovim_patch:
-            # When we try to process a CREATE action that refers to the same
-            # instance_scenario_id and sce_net_id
-            action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should be fail
-        db_action = self.persist.query_one('vim_wim_actions', WHERE={
-            'instance_action_id': action.instance_action_id,
-            'task_index': action.task_index})
-        self.assertEqual(db_action['status'], 'FAILED')
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        self.assertEqual(instance_action['number_done'], number_done)
-        self.assertEqual(instance_action['number_failed'], number_failed + 1)
-
-
-class TestDelete(TestActionsWithDb):
-    @disable_foreign_keys
-    def test_process__no_internal_id(self):
-        # Given no WAN link was created yet,
-        # when we try to process a DELETE action, with no wim_internal_id
-        action = WanLinkDelete(eg.wim_actions('DELETE')[0])
-        action.wim_internal_id = None
-        # -- ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        self.populate([{'vim_wim_actions': action_record,
-                        'instance_wim_nets': eg.instance_wim_nets()}])
-        # <-- #
-        action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should succeed
-        assert action.is_done
-
-    def prepare_delete(self):
-        db_state = eg.consistent_set(num_wims=1, num_tenants=1,
-                                     num_datacenters=2,
-                                     external_ports_config=True)
-
-        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1,
-                                         status='ACTIVE')
-        for i, net in enumerate(instance_nets):
-            net['vim_info'] = {}
-            net['vim_info']['provider:physical_network'] = 'provider'
-            net['vim_info']['encapsulation_type'] = 'vlan'
-            net['vim_info']['encapsulation_id'] = i
-            net['sdn_net_id'] = uuid('sdn-net%d' % i)
-
-        instance_action = eg.instance_action(action_id='ACTION-000')
-
-        db_state += [
-            {'instance_wim_nets': eg.instance_wim_nets()},
-            {'instance_nets': [preprocess_record(r) for r in instance_nets]},
-            {'instance_actions': instance_action}]
-
-        action = WanLinkDelete(
-            eg.wim_actions('DELETE', action_id='ACTION-000')[0])
-        # --> ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        db_state += [{'vim_wim_actions': action_record}]
-
-        return db_state, action
-
-    @disable_foreign_keys
-    def test_process(self):
-        # Given we want to delete 1 WAN link between 2 datacenters
-        db_state, action = self.prepare_delete()
-        self.populate(db_state)
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        number_done = instance_action['number_done']
-        number_failed = instance_action['number_failed']
-
-        connector_patch = patch.object(
-            self.connector, 'delete_connectivity_service')
-
-        # If the connector works fine
-        with connector_patch:
-            # When we try to process a DELETE action that refers to the same
-            # instance_scenario_id and sce_net_id
-            action.process(self.connector, self.persist, self.ovim)
-
-        # Then the action should be succeeded
-        db_action = self.persist.query_one('vim_wim_actions', WHERE={
-            'instance_action_id': action.instance_action_id,
-            'task_index': action.task_index})
-        self.assertEqual(db_action['status'], 'DONE')
-
-        instance_action = self.persist.get_by_uuid(
-            'instance_actions', action.instance_action_id)
-        self.assertEqual(instance_action['number_done'], number_done + 1)
-        self.assertEqual(instance_action['number_failed'], number_failed)
-
-    @disable_foreign_keys
-    def test_process__wan_link_error(self):
-        # Given we have a delete action that targets a wan link with an error
-        db_state, action = self.prepare_delete()
-        wan_link = [tables for tables in db_state
-                    if tables.get('instance_wim_nets')][0]['instance_wim_nets']
-        from pprint import pprint
-        pprint(wan_link)
-        wan_link[0]['status'] = 'ERROR'
-        self.populate(db_state)
-
-        # When we try to process it
-        action.process(self.connector, self.persist, self.ovim)
-
-        # Then it should fail
-        assert action.is_failed
-
-    def create_action(self):
-        action = WanLinkCreate(
-            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
-        # --> ensure it is in the database for updates --> #
-        action_record = action.as_record()
-        action_record['extra'] = json.dumps(action_record['extra'])
-        self.populate([{'vim_wim_actions': action_record}])
-
-        return action
-
-    @disable_foreign_keys
-    def test_create_and_delete(self):
-        # Given a CREATE action was well succeeded
-        db_state, delete_action = self.prepare_delete()
-        self.populate(db_state)
-
-        delete_action.save(self.persist, task_index=1)
-        create_action = self.create_action()
-
-        connector_patch = patch.multiple(
-            self.connector,
-            delete_connectivity_service=MagicMock(),
-            create_connectivity_service=(
-                lambda *_, **__: (uuid('random-id'), None)))
-
-        with connector_patch:  # , ovim_patch:
-            create_action.process(self.connector, self.persist, self.ovim)
-
-        # When we try to process a CREATE action that refers to the same
-        # instance_scenario_id and sce_net_id
-        with connector_patch:
-            delete_action.process(self.connector, self.persist, self.ovim)
-
-        # Then the DELETE action should be successful
-        db_action = self.persist.query_one('vim_wim_actions', WHERE={
-            'instance_action_id': delete_action.instance_action_id,
-            'task_index': delete_action.task_index})
-        self.assertEqual(db_action['status'], 'DONE')
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/wim/tests/test_engine.py b/osm_ro/wim/tests/test_engine.py
deleted file mode 100644 (file)
index 9bb7bca..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-from __future__ import unicode_literals
-
-import unittest
-
-from mock import MagicMock
-
-from . import fixtures as eg
-from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
-from ..errors import NoWimConnectedToDatacenters
-from ..engine import WimEngine
-from ..persistence import WimPersistence
-
-
-class TestWimEngineDbMethods(TestCaseWithDatabasePerTest):
-    def setUp(self):
-        super(TestWimEngineDbMethods, self).setUp()
-        self.persist = WimPersistence(self.db)
-        self.engine = WimEngine(persistence=self.persist)
-        self.addCleanup(self.engine.stop_threads)
-
-    def populate(self, seeds=None):
-        super(TestWimEngineDbMethods, self).populate(
-            seeds or eg.consistent_set())
-
-    def test_find_common_wims(self):
-        # Given we have 2 WIM, 3 datacenters, but just 1 of the WIMs have
-        # access to them
-        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
-                      eg.wim_set(0, 0) +
-                      eg.wim_set(1, 0) +
-                      eg.datacenter_set(0, 0) +
-                      eg.datacenter_set(1, 0) +
-                      eg.datacenter_set(2, 0) +
-                      [{'wim_port_mappings': [
-                          eg.wim_port_mapping(0, 0),
-                          eg.wim_port_mapping(0, 1),
-                          eg.wim_port_mapping(0, 2)]}])
-
-        # When we retrieve the wims interconnecting some datacenters
-        wim_ids = self.engine.find_common_wims(
-            [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
-
-        # Then we should have just the first wim
-        self.assertEqual(len(wim_ids), 1)
-        self.assertEqual(wim_ids[0], uuid('wim0'))
-
-    def test_find_common_wims_multiple(self):
-        # Given we have 2 WIM, 3 datacenters, and all the WIMs have access to
-        # all datacenters
-        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
-                      eg.wim_set(0, 0) +
-                      eg.wim_set(1, 0) +
-                      eg.datacenter_set(0, 0) +
-                      eg.datacenter_set(1, 0) +
-                      eg.datacenter_set(2, 0) +
-                      [{'wim_port_mappings': [
-                          eg.wim_port_mapping(0, 0),
-                          eg.wim_port_mapping(0, 1),
-                          eg.wim_port_mapping(0, 2),
-                          eg.wim_port_mapping(1, 0),
-                          eg.wim_port_mapping(1, 1),
-                          eg.wim_port_mapping(1, 2)]}])
-
-        # When we retrieve the wims interconnecting tree datacenters
-        wim_ids = self.engine.find_common_wims(
-            [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
-
-        # Then we should have all the wims
-        self.assertEqual(len(wim_ids), 2)
-        self.assertItemsEqual(wim_ids, [uuid('wim0'), uuid('wim1')])
-
-    def test_find_common_wim(self):
-        # Given we have 1 WIM, 3 datacenters but the WIM have access to just 2
-        # of them
-        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
-                      eg.wim_set(0, 0) +
-                      eg.datacenter_set(0, 0) +
-                      eg.datacenter_set(1, 0) +
-                      eg.datacenter_set(2, 0) +
-                      [{'wim_port_mappings': [
-                          eg.wim_port_mapping(0, 0),
-                          eg.wim_port_mapping(0, 1)]}])
-
-        # When we retrieve the common wim for the 2 datacenter that are
-        # interconnected
-        wim_id = self.engine.find_common_wim(
-            [uuid('dc0'), uuid('dc1')], tenant='tenant0')
-
-        # Then we should find the wim
-        self.assertEqual(wim_id, uuid('wim0'))
-
-        # When we try to retrieve the common wim for the all the datacenters
-        # Then a NoWimConnectedToDatacenters exception should be raised
-        with self.assertRaises(NoWimConnectedToDatacenters):
-            self.engine.find_common_wim(
-                [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
-
-    def test_find_common_wim__different_tenants(self):
-        # Given we have 1 WIM and 2 datacenters connected but the WIMs don't
-        # belong to the tenant we have access to...
-        self.populate([{'nfvo_tenants': [eg.tenant(0), eg.tenant(1)]}] +
-                      eg.wim_set(0, 0) +
-                      eg.datacenter_set(0, 0) +
-                      eg.datacenter_set(1, 0) +
-                      [{'wim_port_mappings': [
-                          eg.wim_port_mapping(0, 0),
-                          eg.wim_port_mapping(0, 1)]}])
-
-        # When we retrieve the common wim for the 2 datacenter that are
-        # interconnected, but using another tenant,
-        # Then we should get an exception
-        with self.assertRaises(NoWimConnectedToDatacenters):
-            self.engine.find_common_wim(
-                [uuid('dc0'), uuid('dc1')], tenant='tenant1')
-
-
-class TestWimEngine(unittest.TestCase):
-    def test_derive_wan_link(self):
-        # Given we have 2 datacenters connected by the same WIM, with port
-        # mappings registered
-        mappings = [eg.processed_port_mapping(0, 0),
-                    eg.processed_port_mapping(0, 1)]
-        persist = MagicMock(
-            get_wim_port_mappings=MagicMock(return_value=mappings))
-
-        engine = WimEngine(persistence=persist)
-        self.addCleanup(engine.stop_threads)
-
-        # When we receive a list of 4 instance nets, representing
-        # 2 VLDs connecting 2 datacenters each
-        instance_nets = eg.instance_nets(2, 2)
-        wan_links = engine.derive_wan_links({}, instance_nets, uuid('tenant0'))
-
-        # Then we should derive 2 wan_links with the same instance_scenario_id
-        # and different scenario_network_id
-        self.assertEqual(len(wan_links), 2)
-        for link in wan_links:
-            self.assertEqual(link['instance_scenario_id'], uuid('nsr0'))
-        # Each VLD needs a network to be created in each datacenter
-        self.assertItemsEqual([l['sce_net_id'] for l in wan_links],
-                              [uuid('vld0'), uuid('vld1')])
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/wim/tests/test_http_handler.py b/osm_ro/wim/tests/test_http_handler.py
deleted file mode 100644 (file)
index 428b1ce..0000000
+++ /dev/null
@@ -1,575 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-from __future__ import unicode_literals
-
-import unittest
-
-import bottle
-from mock import MagicMock, patch
-from webtest import TestApp
-
-from . import fixtures as eg  # "examples"
-from ...http_tools.errors import Conflict, Not_Found
-from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
-from ...utils import merge_dicts
-from ..http_handler import WimHandler
-
-OK = 200
-
-
-@patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())  # Avoid external calls
-@patch('osm_ro.wim.wim_thread.WimThread.start', MagicMock())  # Avoid running
-class TestHttpHandler(TestCaseWithDatabasePerTest):
-    def setUp(self):
-        super(TestHttpHandler, self).setUp()
-        bottle.debug(True)
-        handler = WimHandler(db=self.db)
-        self.engine = handler.engine
-        self.addCleanup(self.engine.stop_threads)
-        self.app = TestApp(handler.wsgi_app)
-
-    def populate(self, seeds=None):
-        super(TestHttpHandler, self).populate(seeds or eg.consistent_set())
-
-    def test_list_wims(self):
-        # Given some wims are registered in the database
-        self.populate()
-        # when a GET /<tenant_id>/wims request arrives
-        tenant_id = uuid('tenant0')
-        response = self.app.get('/{}/wims'.format(tenant_id))
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and all the registered wims should be present
-        retrieved_wims = {v['name']: v for v in response.json['wims']}
-        for name in retrieved_wims:
-            identifier = int(name.replace('wim', ''))
-            self.assertDictContainsSubset(
-                eg.wim(identifier), retrieved_wims[name])
-
-    def test_show_wim(self):
-        # Given some wims are registered in the database
-        self.populate()
-        # when a GET /<tenant_id>/wims/<wim_id> request arrives
-        tenant_id = uuid('tenant0')
-        wim_id = uuid('wim1')
-        response = self.app.get('/{}/wims/{}'.format(tenant_id, wim_id))
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and the registered wim (wim1) should be present
-        self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
-        # Moreover, it also works with tenant_id =  all
-        response = self.app.get('/any/wims/{}'.format(wim_id))
-        self.assertEqual(response.status_code, OK)
-        self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
-
-    def test_show_wim__wim_doesnt_exists(self):
-        # Given wim_id does not refer to any already registered wim
-        self.populate()
-        # when a GET /<tenant_id>/wims/<wim_id> request arrives
-        tenant_id = uuid('tenant0')
-        wim_id = uuid('wim999')
-        response = self.app.get(
-            '/{}/wims/{}'.format(tenant_id, wim_id),
-            expect_errors=True)
-
-        # then the result should not be well succeeded
-        self.assertEqual(response.status_code, Not_Found)
-
-    def test_show_wim__tenant_doesnt_exists(self):
-        # Given wim_id does not refer to any already registered wim
-        self.populate()
-        # when a GET /<tenant_id>/wims/<wim_id> request arrives
-        tenant_id = uuid('tenant999')
-        wim_id = uuid('wim0')
-        response = self.app.get(
-            '/{}/wims/{}'.format(tenant_id, wim_id),
-            expect_errors=True)
-
-        # then the result should not be well succeeded
-        self.assertEqual(response.status_code, Not_Found)
-
-    def test_edit_wim(self):
-        # Given a WIM exists in the database
-        self.populate()
-        # when a PUT /wims/<wim_id> request arrives
-        wim_id = uuid('wim1')
-        response = self.app.put_json('/wims/{}'.format(wim_id), {
-            'wim': {'name': 'My-New-Name'}})
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and the registered wim (wim1) should be present
-        self.assertDictContainsSubset(
-            merge_dicts(eg.wim(1), name='My-New-Name'),
-            response.json['wim'])
-
-    def test_edit_wim__port_mappings(self):
-        # Given a WIM exists in the database
-        self.populate()
-        # when a PUT /wims/<wim_id> request arrives
-        wim_id = uuid('wim1')
-        response = self.app.put_json(
-            '/wims/{}'.format(wim_id), {
-                'wim': dict(
-                    name='My-New-Name',
-                    config={'wim_port_mapping': [{
-                        'datacenter_name': 'dc0',
-                        'pop_wan_mappings': [{
-                            'pop_switch_dpid': '00:AA:11:BB:22:CC:33:DD',
-                            'pop_switch_port': 1,
-                            'wan_service_mapping_info': {
-                                'mapping_type': 'dpid-port',
-                                'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:0A',
-                                'wan_switch_port': 1
-                            }
-                        }]}]
-                    }
-                )
-            }
-        )
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and the registered wim (wim1) should be present
-        self.assertDictContainsSubset(
-            merge_dicts(eg.wim(1), name='My-New-Name'),
-            response.json['wim'])
-        # and the port mappings hould be updated
-        mappings = response.json['wim']['config']['wim_port_mapping']
-        self.assertEqual(len(mappings), 1)
-        self.assertEqual(
-            mappings[0]['pop_wan_mappings'][0]['pop_switch_dpid'],
-            '00:AA:11:BB:22:CC:33:DD')
-
-    def test_delete_wim(self):
-        # Given a WIM exists in the database
-        self.populate()
-        num_accounts = self.count('wim_accounts')
-        num_associations = self.count('wim_nfvo_tenants')
-        num_mappings = self.count('wim_port_mappings')
-
-        with self.engine.threads_running():
-            num_threads = len(self.engine.threads)
-            # when a DELETE /wims/<wim_id> request arrives
-            wim_id = uuid('wim1')
-            response = self.app.delete('/wims/{}'.format(wim_id))
-            num_threads_after = len(self.engine.threads)
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        self.assertIn('deleted', response.json['result'])
-        # and the registered wim1 should be deleted
-        response = self.app.get(
-            '/any/wims/{}'.format(wim_id),
-            expect_errors=True)
-        self.assertEqual(response.status_code, Not_Found)
-        # and all the dependent records in other tables should be deleted:
-        # wim_accounts, wim_nfvo_tenants, wim_port_mappings
-        self.assertEqual(self.count('wim_nfvo_tenants'),
-                         num_associations - eg.NUM_TENANTS)
-        self.assertLess(self.count('wim_port_mappings'), num_mappings)
-        self.assertEqual(self.count('wim_accounts'),
-                         num_accounts - eg.NUM_TENANTS)
-        # And the threads associated with the wim accounts should be stopped
-        self.assertEqual(num_threads_after, num_threads - eg.NUM_TENANTS)
-
-    def test_create_wim(self):
-        # Given no WIM exists yet
-        # when a POST /wims request arrives with the right payload
-        response = self.app.post_json('/wims', {'wim': eg.wim(999)})
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        self.assertEqual(response.json['wim']['name'], 'wim999')
-
-    def test_create_wim__port_mappings(self):
-        self.populate()
-        # when a POST /wims request arrives with the right payload
-        response = self.app.post_json(
-            '/wims', {
-                'wim': merge_dicts(
-                    eg.wim(999),
-                    config={'wim_port_mapping': [{
-                        'datacenter_name': 'dc0',
-                        'pop_wan_mappings': [{
-                            'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:01',
-                            'pop_switch_port': 1,
-                            'wan_service_mapping_info': {
-                                'mapping_type': 'dpid-port',
-                                'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:01',
-                                'wan_switch_port': 1
-                            }
-                        }]}]
-                    }
-                )
-            }
-        )
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        self.assertEqual(response.json['wim']['name'], 'wim999')
-        self.assertEqual(
-            len(response.json['wim']['config']['wim_port_mapping']), 1)
-
-    def test_create_wim_account(self):
-        # Given a WIM and a NFVO tenant exist but are not associated
-        self.populate([{'wims': [eg.wim(0)]},
-                       {'nfvo_tenants': [eg.tenant(0)]}])
-
-        with self.engine.threads_running():
-            num_threads = len(self.engine.threads)
-            # when a POST /<tenant_id>/wims/<wim_id> arrives
-            response = self.app.post_json(
-                '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
-                {'wim_account': eg.wim_account(0, 0)})
-
-            num_threads_after = len(self.engine.threads)
-
-        # then a new thread should be created
-        self.assertEqual(num_threads_after, num_threads + 1)
-
-        # and the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        self.assertEqual(response.json['wim_account']['name'], 'wim-account00')
-
-        # and a new association record should be created
-        association = self.db.get_rows(FROM='wim_nfvo_tenants')
-        assert association
-        self.assertEqual(len(association), 1)
-        self.assertEqual(association[0]['wim_id'], uuid('wim0'))
-        self.assertEqual(association[0]['nfvo_tenant_id'], uuid('tenant0'))
-        self.assertEqual(association[0]['wim_account_id'],
-                         response.json['wim_account']['uuid'])
-
-    def test_create_wim_account__existing_account(self):
-        # Given a WIM, a WIM account and a NFVO tenants exist
-        # But the NFVO and the WIM are not associated
-        self.populate([
-            {'wims': [eg.wim(0)]},
-            {'nfvo_tenants': [eg.tenant(0)]},
-            {'wim_accounts': [eg.wim_account(0, 0)]}])
-
-        # when a POST /<tenant_id>/wims/<wim_id> arrives
-        # and it refers to an existing wim account
-        response = self.app.post_json(
-            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
-            {'wim_account': {'name': 'wim-account00'}})
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and the association should be created
-        association = self.db.get_rows(
-            FROM='wim_nfvo_tenants',
-            WHERE={'wim_id': uuid('wim0'),
-                   'nfvo_tenant_id': uuid('tenant0')})
-        assert association
-        self.assertEqual(len(association), 1)
-        # but no new wim_account should be created
-        wim_accounts = self.db.get_rows(FROM='wim_accounts')
-        self.assertEqual(len(wim_accounts), 1)
-        self.assertEqual(wim_accounts[0]['name'], 'wim-account00')
-
-    def test_create_wim_account__existing_account__differing(self):
-        # Given a WIM, a WIM account and a NFVO tenants exist
-        # But the NFVO and the WIM are not associated
-        self.populate([
-            {'wims': [eg.wim(0)]},
-            {'nfvo_tenants': [eg.tenant(0)]},
-            {'wim_accounts': [eg.wim_account(0, 0)]}])
-
-        # when a POST /<tenant_id>/wims/<wim_id> arrives
-        # and it refers to an existing wim account,
-        # but with different fields
-        response = self.app.post_json(
-            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
-                'wim_account': {
-                    'name': 'wim-account00',
-                    'user': 'john',
-                    'password': 'abc123'}},
-            expect_errors=True)
-
-        # then the request should not be well succeeded
-        self.assertEqual(response.status_code, Conflict)
-        # some useful message should be displayed
-        response.mustcontain('attempt to overwrite', 'user', 'password')
-        # and the association should not be created
-        association = self.db.get_rows(
-            FROM='wim_nfvo_tenants',
-            WHERE={'wim_id': uuid('wim0'),
-                   'nfvo_tenant_id': uuid('tenant0')})
-        assert not association
-
-    def test_create_wim_account__association_already_exists(self):
-        # Given a WIM, a WIM account and a NFVO tenants exist
-        # and are correctly associated
-        self.populate()
-        num_assoc_before = self.count('wim_nfvo_tenants')
-
-        # when a POST /<tenant_id>/wims/<wim_id> arrives trying to connect a
-        # WIM and a tenant for the second time
-        response = self.app.post_json(
-            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
-                'wim_account': {
-                    'user': 'user999',
-                    'password': 'password999'}},
-            expect_errors=True)
-
-        # then the request should not be well succeeded
-        self.assertEqual(response.status_code, Conflict)
-        # the message should be useful
-        response.mustcontain('There is already', uuid('wim0'), uuid('tenant0'))
-
-        num_assoc_after = self.count('wim_nfvo_tenants')
-
-        # and the number of association record should not be increased
-        self.assertEqual(num_assoc_before, num_assoc_after)
-
-    def test_create_wim__tenant_doesnt_exist(self):
-        # Given a tenant not exists
-        self.populate()
-
-        # But the user tries to create a wim_account anyway
-        response = self.app.post_json(
-            '/{}/wims/{}'.format(uuid('tenant999'), uuid('wim0')), {
-                'wim_account': {
-                    'user': 'user999',
-                    'password': 'password999'}},
-            expect_errors=True)
-
-        # then the request should not be well succeeded
-        self.assertEqual(response.status_code, Not_Found)
-        # the message should be useful
-        response.mustcontain('No record was found', uuid('tenant999'))
-
-    def test_create_wim__wim_doesnt_exist(self):
-        # Given a tenant not exists
-        self.populate()
-
-        # But the user tries to create a wim_account anyway
-        response = self.app.post_json(
-            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim999')), {
-                'wim_account': {
-                    'user': 'user999',
-                    'password': 'password999'}},
-            expect_errors=True)
-
-        # then the request should not be well succeeded
-        self.assertEqual(response.status_code, Not_Found)
-        # the message should be useful
-        response.mustcontain('No record was found', uuid('wim999'))
-
-    def test_update_wim_account(self):
-        # Given a WIM account connecting a tenant and a WIM exists
-        self.populate()
-
-        with self.engine.threads_running():
-            num_threads = len(self.engine.threads)
-
-            thread = self.engine.threads[uuid('wim-account00')]
-            reload = MagicMock(wraps=thread.reload)
-
-            with patch.object(thread, 'reload', reload):
-                # when a PUT /<tenant_id>/wims/<wim_id> arrives
-                response = self.app.put_json(
-                    '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
-                        'wim_account': {
-                            'name': 'account888',
-                            'user': 'user888'}})
-
-            num_threads_after = len(self.engine.threads)
-
-        # then the wim thread should be restarted
-        reload.assert_called_once()
-        # and no thread should be added or removed
-        self.assertEqual(num_threads_after, num_threads)
-
-        # and the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        self.assertEqual(response.json['wim_account']['name'], 'account888')
-        self.assertEqual(response.json['wim_account']['user'], 'user888')
-
-    def test_update_wim_account__multiple(self):
-        # Given a WIM account connected to several tenants
-        self.populate()
-
-        with self.engine.threads_running():
-            # when a PUT /any/wims/<wim_id> arrives
-            response = self.app.put_json(
-                '/any/wims/{}'.format(uuid('wim0')), {
-                    'wim_account': {
-                        'user': 'user888',
-                        'config': {'x': 888}}})
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        self.assertEqual(len(response.json['wim_accounts']), eg.NUM_TENANTS)
-
-        for account in response.json['wim_accounts']:
-            self.assertEqual(account['user'], 'user888')
-            self.assertEqual(account['config']['x'], 888)
-
-    def test_delete_wim_account(self):
-        # Given a WIM account exists and it is connected to a tenant
-        self.populate()
-
-        num_accounts_before = self.count('wim_accounts')
-
-        with self.engine.threads_running():
-            thread = self.engine.threads[uuid('wim-account00')]
-            exit = MagicMock(wraps=thread.exit)
-            num_threads = len(self.engine.threads)
-
-            with patch.object(thread, 'exit', exit):
-                # when a PUT /<tenant_id>/wims/<wim_id> arrives
-                response = self.app.delete_json(
-                    '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')))
-
-            num_threads_after = len(self.engine.threads)
-
-        # then the wim thread should exit
-        self.assertEqual(num_threads_after, num_threads - 1)
-        exit.assert_called_once()
-
-        # and the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        response.mustcontain('account `wim-account00` deleted')
-
-        # and the number of wim_accounts should decrease
-        num_accounts_after = self.count('wim_accounts')
-        self.assertEqual(num_accounts_after, num_accounts_before - 1)
-
-    def test_delete_wim_account__multiple(self):
-        # Given a WIM account exists and it is connected to several tenants
-        self.populate()
-
-        num_accounts_before = self.count('wim_accounts')
-
-        with self.engine.threads_running():
-            # when a PUT /<tenant_id>/wims/<wim_id> arrives
-            response = self.app.delete_json(
-                '/any/wims/{}'.format(uuid('wim0')))
-
-        # then the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        response.mustcontain('account `wim-account00` deleted')
-        response.mustcontain('account `wim-account10` deleted')
-
-        # and the number of wim_accounts should decrease
-        num_accounts_after = self.count('wim_accounts')
-        self.assertEqual(num_accounts_after,
-                         num_accounts_before - eg.NUM_TENANTS)
-
-    def test_delete_wim_account__doesnt_exist(self):
-        # Given we have a tenant that is not connected to a WIM
-        self.populate()
-        tenant = {'uuid': uuid('tenant888'), 'name': 'tenant888'}
-        self.populate([{'nfvo_tenants': [tenant]}])
-
-        num_accounts_before = self.count('wim_accounts')
-
-        # when a PUT /<tenant_id>/wims/<wim_id> arrives
-        response = self.app.delete(
-            '/{}/wims/{}'.format(uuid('tenant888'), uuid('wim0')),
-            expect_errors=True)
-
-        # then the request should not succeed
-        self.assertEqual(response.status_code, Not_Found)
-
-        # and the number of wim_accounts should not decrease
-        num_accounts_after = self.count('wim_accounts')
-        self.assertEqual(num_accounts_after, num_accounts_before)
-
-    def test_create_port_mappings(self):
-        # Given we have a wim and datacenter without any port mappings
-        self.populate([{'nfvo_tenants': eg.tenant(0)}] +
-                      eg.datacenter_set(888, 0) +
-                      eg.wim_set(999, 0))
-
-        # when a POST /<tenant_id>/wims/<wim_id>/port_mapping arrives
-        response = self.app.post_json(
-            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim999')),
-            {'wim_port_mapping': [{
-                'datacenter_name': 'dc888',
-                'pop_wan_mappings': [
-                    {'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
-                     'pop_switch_port': 1,
-                     'wan_service_mapping_info': {
-                         'mapping_type': 'dpid-port',
-                         'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:BB',
-                         'wan_switch_port': 1
-                     }}
-                ]}
-            ]})
-
-        # the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and port mappings should be stored in the database
-        port_mapping = self.db.get_rows(FROM='wim_port_mappings')
-        self.assertEqual(len(port_mapping), 1)
-
-    def test_get_port_mappings(self):
-        # Given WIMS and datacenters exist with port mappings between them
-        self.populate()
-        # when a GET /<tenant_id>/wims/<wim_id>/port_mapping arrives
-        response = self.app.get(
-            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
-        # the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and we should see port mappings for each WIM, datacenter pair
-        mappings = response.json['wim_port_mapping']
-        self.assertEqual(len(mappings), eg.NUM_DATACENTERS)
-        # ^  In the fixture set all the datacenters are connected to all wims
-
-    def test_delete_port_mappings(self):
-        # Given WIMS and datacenters exist with port mappings between them
-        self.populate()
-        num_mappings_before = self.count('wim_port_mappings')
-
-        # when a DELETE /<tenant_id>/wims/<wim_id>/port_mapping arrives
-        response = self.app.delete(
-            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
-        # the request should be well succeeded
-        self.assertEqual(response.status_code, OK)
-        # and the number of port mappings should decrease
-        num_mappings_after = self.count('wim_port_mappings')
-        self.assertEqual(num_mappings_after,
-                         num_mappings_before - eg.NUM_DATACENTERS)
-        # ^  In the fixture set all the datacenters are connected to all wims
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/wim/tests/test_persistence.py b/osm_ro/wim/tests/test_persistence.py
deleted file mode 100644 (file)
index e3e6cf6..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-from __future__ import unicode_literals
-
-import unittest
-from itertools import chain
-from types import StringType
-
-from six.moves import range
-
-from . import fixtures as eg
-from ...tests.db_helpers import (
-    TestCaseWithDatabasePerTest,
-    disable_foreign_keys,
-    uuid
-)
-from ..persistence import (
-    WimPersistence,
-    hide_confidential_fields,
-    serialize_fields,
-    unserialize_fields
-)
-
-
-class TestPersistenceUtils(unittest.TestCase):
-    def test_hide_confidential_fields(self):
-        example = {
-            'password': '123456',
-            'nested.password': '123456',
-            'nested.secret': None,
-        }
-        result = hide_confidential_fields(example,
-                                          fields=('password', 'secret'))
-        for field in 'password', 'nested.password':
-            assert result[field].startswith('***')
-        self.assertIs(result['nested.secret'], None)
-
-    def test_serialize_fields(self):
-        example = {
-            'config': dict(x=1),
-            'nested.info': [1, 2, 3],
-            'nested.config': None
-        }
-        result = serialize_fields(example, fields=('config', 'info'))
-        for field in 'config', 'nested.info':
-            self.assertIsInstance(result[field], StringType)
-        self.assertIs(result['nested.config'], None)
-
-    def test_unserialize_fields(self):
-        example = {
-            'config': '{"x": 1}',
-            'nested.info': '[1,2,3]',
-            'nested.config': None,
-            'confidential.info': '{"password": "abcdef"}'
-        }
-        result = unserialize_fields(example, fields=('config', 'info'))
-        self.assertEqual(result['config'], dict(x=1))
-        self.assertEqual(result['nested.info'], [1, 2, 3])
-        self.assertIs(result['nested.config'], None)
-        self.assertNotEqual(result['confidential.info']['password'], 'abcdef')
-        assert result['confidential.info']['password'].startswith('***')
-
-
-class TestWimPersistence(TestCaseWithDatabasePerTest):
-    def setUp(self):
-        super(TestWimPersistence, self).setUp()
-        self.persist = WimPersistence(self.db)
-
-    def populate(self, seeds=None):
-        super(TestWimPersistence, self).populate(seeds or eg.consistent_set())
-
-    def test_query_offset(self):
-        # Given a database contains 4 records
-        self.populate([{'wims': [eg.wim(i) for i in range(4)]}])
-
-        # When we query using a limit of 2 and a offset of 1
-        results = self.persist.query('wims',
-                                     ORDER_BY='name', LIMIT=2, OFFSET=1)
-        # Then we should have 2 results, skipping the first record
-        names = [r['name'] for r in results]
-        self.assertItemsEqual(names, ['wim1', 'wim2'])
-
-    def test_get_wim_account_by_wim_tenant(self):
-        # Given a database contains WIM accounts associated to Tenants
-        self.populate()
-
-        # when we retrieve the account using wim and tenant
-        wim_account = self.persist.get_wim_account_by(
-            uuid('wim0'), uuid('tenant0'))
-
-        # then the right record should be returned
-        self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
-        self.assertEqual(wim_account['name'], 'wim-account00')
-        self.assertEqual(wim_account['user'], 'user00')
-
-    def test_get_wim_account_by_wim_tenant__names(self):
-        # Given a database contains WIM accounts associated to Tenants
-        self.populate()
-
-        # when we retrieve the account using wim and tenant
-        wim_account = self.persist.get_wim_account_by(
-            'wim0', 'tenant0')
-
-        # then the right record should be returned
-        self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
-        self.assertEqual(wim_account['name'], 'wim-account00')
-        self.assertEqual(wim_account['user'], 'user00')
-
-    def test_get_wim_accounts_by_wim(self):
-        # Given a database contains WIM accounts associated to Tenants
-        self.populate()
-
-        # when we retrieve the accounts using wim
-        wim_accounts = self.persist.get_wim_accounts_by(uuid('wim0'))
-
-        # then the right records should be returned
-        self.assertEqual(len(wim_accounts), eg.NUM_TENANTS)
-        for account in wim_accounts:
-            self.assertEqual(account['wim_id'], uuid('wim0'))
-
-    def test_get_wim_port_mappings(self):
-        # Given a database with WIMs, datacenters and port-mappings
-        self.populate()
-
-        # when we retrieve the port mappings for a list of datacenters
-        # using either names or uuids
-        for criteria in ([uuid('dc0'), uuid('dc1')], ['dc0', 'dc1']):
-            mappings = self.persist.get_wim_port_mappings(datacenter=criteria)
-
-            # then each result should have a datacenter_id
-            datacenters = [m['datacenter_id'] for m in mappings]
-            for datacenter in datacenters:
-                self.assertIn(datacenter, [uuid('dc0'), uuid('dc1')])
-
-            # a wim_id
-            wims = [m['wim_id'] for m in mappings]
-            for wim in wims:
-                self.assertIsNot(wim, None)
-
-            # and a array of pairs 'wan' <> 'pop' connections
-            pairs = chain(*(m['pop_wan_mappings'] for m in mappings))
-            self.assertEqual(len(list(pairs)), 2 * eg.NUM_WIMS)
-
-    def test_get_wim_port_mappings_multiple(self):
-        # Given we have more then one connection in a datacenter managed by the
-        # WIM
-        self.populate()
-        self.populate([{
-            'wim_port_mappings': [
-                eg.wim_port_mapping(
-                    0, 0,
-                    pop_dpid='CC:CC:CC:CC:CC:CC:CC:CC',
-                    wan_dpid='DD:DD:DD:DD:DD:DD:DD:DD'),
-                eg.wim_port_mapping(
-                    0, 0,
-                    pop_dpid='EE:EE:EE:EE:EE:EE:EE:EE',
-                    wan_dpid='FF:FF:FF:FF:FF:FF:FF:FF')]}])
-
-        # when we retrieve the port mappings for the wim and datacenter:
-        mappings = (
-            self.persist.get_wim_port_mappings(wim='wim0', datacenter='dc0'))
-
-        # then it should return just a single result, grouped by wim and
-        # datacenter
-        self.assertEqual(len(mappings), 1)
-        self.assertEqual(mappings[0]['wim_id'], uuid('wim0'))
-        self.assertEqual(mappings[0]['datacenter_id'], uuid('dc0'))
-
-        self.assertEqual(len(mappings[0]['pop_wan_mappings']), 3)
-
-        # when we retreive the mappings for more then one wim/datacenter
-        # the grouping should still work properly
-        mappings = self.persist.get_wim_port_mappings(
-            wim=['wim0', 'wim1'], datacenter=['dc0', 'dc1'])
-        self.assertEqual(len(mappings), 4)
-        pairs = chain(*(m['pop_wan_mappings'] for m in mappings))
-        self.assertEqual(len(list(pairs)), 6)
-
-    def test_get_actions_in_group(self):
-        # Given a good number of wim actions exist in the database
-        kwargs = {'action_id': uuid('action0')}
-        actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
-                   eg.wim_actions('FIND', num_links=8, **kwargs) +
-                   eg.wim_actions('START', num_links=8, **kwargs))
-        for i, action in enumerate(actions):
-            action['task_index'] = i
-
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions': eg.instance_action(**kwargs)},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we retrieve them in groups
-        limit = 5
-        results = self.persist.get_actions_in_groups(
-            uuid('wim-account00'), ['instance_wim_nets'], group_limit=limit)
-
-        # Then we should have N groups where N == limit
-        self.assertEqual(len(results), limit)
-        for _, task_list in results:
-            # And since for each link we have create 3 actions (create, find,
-            # start), we should find them in each group
-            self.assertEqual(len(task_list), 3)
-
-    @disable_foreign_keys
-    def test_update_instance_action_counters(self):
-        # Given we have one instance action in the database with 2 incomplete
-        # tasks
-        action = eg.instance_action(num_tasks=2)
-        self.populate([{'instance_actions': action}])
-        # When we update the done counter by 0, nothing should happen
-        self.persist.update_instance_action_counters(action['uuid'], done=0)
-        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
-        self.assertEqual(result['number_done'], 0)
-        self.assertEqual(result['number_failed'], 0)
-        # When we update the done counter by 2, number_done should be 2
-        self.persist.update_instance_action_counters(action['uuid'], done=2)
-        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
-        self.assertEqual(result['number_done'], 2)
-        self.assertEqual(result['number_failed'], 0)
-        # When we update the done counter by -1, and the failed counter by 1
-        self.persist.update_instance_action_counters(
-            action['uuid'], done=-1, failed=1)
-        # Then we should see 1 and 1
-        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
-        self.assertEqual(result['number_done'], 1)
-        self.assertEqual(result['number_failed'], 1)
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/wim/tests/test_wim_thread.py b/osm_ro/wim/tests/test_wim_thread.py
deleted file mode 100644 (file)
index 6d61848..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-from __future__ import unicode_literals, print_function
-
-import unittest
-from difflib import unified_diff
-from operator import itemgetter
-from time import time
-
-import json
-
-from mock import MagicMock, patch
-
-from . import fixtures as eg
-from ...tests.db_helpers import (
-    TestCaseWithDatabasePerTest,
-    disable_foreign_keys,
-    uuid
-)
-from ..engine import WimEngine
-from ..persistence import WimPersistence
-from ..wim_thread import WimThread
-
-
-ignore_connector = patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())
-
-
-def _repr(value):
-    return json.dumps(value, indent=4, sort_keys=True)
-
-
-@ignore_connector
-class TestWimThreadWithDb(TestCaseWithDatabasePerTest):
-    def setUp(self):
-        super(TestWimThreadWithDb, self).setUp()
-        self.persist = WimPersistence(self.db)
-        wim = eg.wim(0)
-        account = eg.wim_account(0, 0)
-        account['wim'] = wim
-        self.thread = WimThread(self.persist, account)
-        self.thread.connector = MagicMock()
-
-    def assertTasksEqual(self, left, right):
-        fields = itemgetter('item', 'item_id', 'action', 'status')
-        left_ = (t.as_dict() for t in left)
-        left_ = [fields(t) for t in left_]
-        right_ = [fields(t) for t in right]
-
-        try:
-            self.assertItemsEqual(left_, right_)
-        except AssertionError:
-            print('left', _repr(left))
-            print('left', len(left_), 'items')
-            print('right', len(right_), 'items')
-            result = list(unified_diff(_repr(sorted(left_)).split('\n'),
-                                       _repr(sorted(right_)).split('\n'),
-                                       'left', 'right'))
-            print('diff:\n', '\n'.join(result))
-            raise
-
-    def test_reload_actions__all_create(self):
-        # Given we have 3 CREATE actions stored in the database
-        actions = eg.wim_actions('CREATE',
-                                 action_id=uuid('action0'), num_links=3)
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions':
-                eg.instance_action(action_id=uuid('action0'))},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we reload the tasks
-        self.thread.reload_actions()
-        # All of them should be inserted as pending
-        self.assertTasksEqual(self.thread.pending_tasks, actions)
-
-    def test_reload_actions__all_refresh(self):
-        # Given just DONE tasks are in the database
-        actions = eg.wim_actions(status='DONE',
-                                 action_id=uuid('action0'), num_links=3)
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions':
-                eg.instance_action(action_id=uuid('action0'))},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we reload the tasks
-        self.thread.reload_actions()
-        # All of them should be inserted as refresh
-        self.assertTasksEqual(self.thread.refresh_tasks, actions)
-
-    def test_reload_actions__grouped(self):
-        # Given we have 2 tasks for the same item in the database
-        kwargs = {'action_id': uuid('action0')}
-        actions = (eg.wim_actions('CREATE', **kwargs) +
-                   eg.wim_actions('FIND', **kwargs))
-        for i, action in enumerate(actions):
-            action['task_index'] = i
-
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions': eg.instance_action(**kwargs)},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we reload the tasks
-        self.thread.reload_actions()
-        # Just one group should be created
-        self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
-
-    def test_reload_actions__delete_scheduled(self):
-        # Given we have 3 tasks for the same item in the database, but one of
-        # them is a DELETE task and it is SCHEDULED
-        kwargs = {'action_id': uuid('action0')}
-        actions = (eg.wim_actions('CREATE', **kwargs) +
-                   eg.wim_actions('FIND', **kwargs) +
-                   eg.wim_actions('DELETE', status='SCHEDULED', **kwargs))
-        for i, action in enumerate(actions):
-            action['task_index'] = i
-
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions': eg.instance_action(**kwargs)},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we reload the tasks
-        self.thread.reload_actions()
-        # Just one group should be created
-        self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
-
-    def test_reload_actions__delete_done(self):
-        # Given we have 3 tasks for the same item in the database, but one of
-        # them is a DELETE task and it is not SCHEDULED
-        kwargs = {'action_id': uuid('action0')}
-        actions = (eg.wim_actions('CREATE', **kwargs) +
-                   eg.wim_actions('FIND', **kwargs) +
-                   eg.wim_actions('DELETE', status='DONE', **kwargs))
-        for i, action in enumerate(actions):
-            action['task_index'] = i
-
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions': eg.instance_action(**kwargs)},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we reload the tasks
-        self.thread.reload_actions()
-        # No pending task should be found
-        self.assertEqual(self.thread.pending_tasks, [])
-
-    def test_reload_actions__batch(self):
-        # Given the group_limit is 10, and we have 24
-        group_limit = 10
-        kwargs = {'action_id': uuid('action0')}
-        actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
-                   eg.wim_actions('FIND', num_links=8, **kwargs) +
-                   eg.wim_actions('FIND', num_links=8, **kwargs))
-        for i, action in enumerate(actions):
-            action['task_index'] = i
-
-        self.populate([
-            {'nfvo_tenants': eg.tenant()}
-        ] + eg.wim_set() + [
-            {'instance_actions': eg.instance_action(**kwargs)},
-            {'vim_wim_actions': actions}
-        ])
-
-        # When we reload the tasks
-        self.thread.reload_actions(group_limit)
-
-        # Then we should still see the actions in memory properly
-        self.assertTasksEqual(self.thread.pending_tasks, actions)
-        self.assertEqual(len(self.thread.grouped_tasks.values()), 8)
-
-    @disable_foreign_keys
-    def test_process_list__refresh(self):
-        update_wan_link = MagicMock(wrap=self.persist.update_wan_link)
-        update_action = MagicMock(wrap=self.persist.update_wan_link)
-        patches = dict(update_wan_link=update_wan_link,
-                       update_action=update_action)
-
-        with patch.multiple(self.persist, **patches):
-            # Given we have 2 tasks in the refresh queue
-            kwargs = {'action_id': uuid('action0')}
-            actions = (eg.wim_actions('FIND', 'DONE', **kwargs) +
-                       eg.wim_actions('CREATE', 'BUILD', **kwargs))
-            for i, action in enumerate(actions):
-                action['task_index'] = i
-
-            self.populate(
-                [{'instance_wim_nets': eg.instance_wim_nets()}] +
-                [{'instance_actions':
-                    eg.instance_action(num_tasks=2, **kwargs)}] +
-                [{'vim_wim_actions': actions}])
-
-            self.thread.insert_pending_tasks(actions)
-
-            # When we process the refresh list
-            processed = self.thread.process_list('refresh')
-
-            # Then we should have 2 updates
-            self.assertEqual(processed, 2)
-
-            # And the database should be updated accordingly
-            self.assertEqual(update_wan_link.call_count, 2)
-            self.assertEqual(update_action.call_count, 2)
-
-    @disable_foreign_keys
-    def test_delete_superseed_create(self):
-        # Given we insert a scheduled CREATE task
-        instance_action = eg.instance_action(num_tasks=1)
-        self.thread.pending_tasks = []
-        engine = WimEngine(persistence=self.persist)
-        self.addCleanup(engine.stop_threads)
-        wan_links = eg.instance_wim_nets()
-        create_actions = engine.create_actions(wan_links)
-        delete_actions = engine.delete_actions(wan_links)
-        engine.incorporate_actions(create_actions + delete_actions,
-                                   instance_action)
-
-        self.populate(instance_actions=instance_action,
-                      vim_wim_actions=create_actions + delete_actions)
-
-        self.thread.insert_pending_tasks(create_actions)
-
-        assert self.thread.pending_tasks[0].is_scheduled
-
-        # When we insert the equivalent DELETE task
-        self.thread.insert_pending_tasks(delete_actions)
-
-        # Then the CREATE task should be superseded
-        self.assertEqual(self.thread.pending_tasks[0].action, 'CREATE')
-        assert self.thread.pending_tasks[0].is_superseded
-
-        self.thread.process_list('pending')
-        self.thread.process_list('refresh')
-        self.assertFalse(self.thread.pending_tasks)
-
-
-@ignore_connector
-class TestWimThread(unittest.TestCase):
-    def setUp(self):
-        wim = eg.wim(0)
-        account = eg.wim_account(0, 0)
-        account['wim'] = wim
-        self.persist = MagicMock()
-        self.thread = WimThread(self.persist, account)
-        self.thread.connector = MagicMock()
-
-        super(TestWimThread, self).setUp()
-
-    def test_process_refresh(self):
-        # Given we have 30 tasks in the refresh queue
-        kwargs = {'action_id': uuid('action0')}
-        actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
-        self.thread.insert_pending_tasks(actions)
-
-        # When we process the refresh list
-        processed = self.thread.process_list('refresh')
-
-        # Then we should have REFRESH_BATCH updates
-        self.assertEqual(processed, self.thread.BATCH)
-
-    def test_process_refresh__with_superseded(self):
-        # Given we have 30 tasks but 15 of them are superseded
-        kwargs = {'action_id': uuid('action0')}
-        actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
-        self.thread.insert_pending_tasks(actions)
-        for task in self.thread.refresh_tasks[0:30:2]:
-            task.status = 'SUPERSEDED'
-
-        now = time()
-
-        # When we call the refresh_elements
-        processed = self.thread.process_list('refresh')
-
-        # Then we should have 25 updates (since SUPERSEDED updates are cheap,
-        # they are not counted for the limits)
-        self.assertEqual(processed, 25)
-
-        # The SUPERSEDED tasks should be removed, 5 tasks should be untouched,
-        # and 10 tasks should be rescheduled
-        refresh_tasks = self.thread.refresh_tasks
-        old = [t for t in refresh_tasks if t.process_at <= now]
-        new = [t for t in refresh_tasks if t.process_at > now]
-        self.assertEqual(len(old), 5)
-        self.assertEqual(len(new), 10)
-        self.assertEqual(len(self.thread.refresh_tasks), 15)
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/osm_ro/wim/tox.ini b/osm_ro/wim/tox.ini
deleted file mode 100644 (file)
index 29f1a8f..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# This tox file allows the devs to run unit tests only for this subpackage.
-# In order to do so, cd into the directory and run `tox`
-
-[tox]
-minversion = 1.8
-envlist = py27,flake8,radon
-skipsdist = True
-
-[testenv]
-passenv = *_DB_*
-setenv =
-    PATH = {env:PATH}:{toxinidir}/../../database_utils
-    DBUTILS = {toxinidir}/../../database_utils
-changedir = {toxinidir}
-commands =
-    nosetests -v -d {posargs:tests}
-deps =
-    WebTest
-    logging
-    bottle
-    coverage
-    jsonschema
-    mock
-    mysqlclient
-    nose
-    six
-    PyYaml
-    paramiko
-    ipdb
-    requests
-
-[testenv:flake8]
-changedir = {toxinidir}
-deps = flake8
-commands = flake8 {posargs:.}
-
-[testenv:radon]
-changedir = {toxinidir}
-deps = radon
-commands =
-    radon cc --show-complexity --total-average {posargs:.}
-    radon mi -s {posargs:.}
-
-[coverage:run]
-branch = True
-source = {toxinidir}
-omit =
-    tests
-    tests/*
-    */test_*
-    .tox/*
-
-[coverage:report]
-show_missing = True
-
-[flake8]
-exclude =
-    .tox
diff --git a/osm_ro/wim/wan_link_actions.py b/osm_ro/wim/wan_link_actions.py
deleted file mode 100644 (file)
index 034e415..0000000
+++ /dev/null
@@ -1,442 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-# pylint: disable=E1101,E0203,W0201
-import json
-from pprint import pformat
-from sys import exc_info
-from time import time
-
-from six import reraise
-
-from ..utils import filter_dict_keys as filter_keys
-from ..utils import merge_dicts, remove_none_items, safe_get, truncate
-from .actions import CreateAction, DeleteAction, FindAction
-from .errors import (
-    InconsistentState,
-    NoRecordFound,
-    NoExternalPortFound
-)
-from wimconn import WimConnectorError
-
-INSTANCE_NET_STATUS_ERROR = ('DOWN', 'ERROR', 'VIM_ERROR',
-                             'DELETED', 'SCHEDULED_DELETION')
-INSTANCE_NET_STATUS_PENDING = ('BUILD', 'INACTIVE', 'SCHEDULED_CREATION')
-INSTANCE_VM_STATUS_ERROR = ('ERROR', 'VIM_ERROR',
-                            'DELETED', 'SCHEDULED_DELETION')
-
-
-class RefreshMixin(object):
-    def refresh(self, connector, persistence):
-        """Ask the external WAN Infrastructure Manager system for updates on
-        the status of the task.
-
-        Arguments:
-            connector: object with API for accessing the WAN
-                Infrastructure Manager system
-            persistence: abstraction layer for the database
-        """
-        fields = ('wim_status', 'wim_info', 'error_msg')
-        result = dict.fromkeys(fields)
-
-        try:
-            result.update(
-                connector
-                .get_connectivity_service_status(self.wim_internal_id))
-        except WimConnectorError as ex:
-            self.logger.exception(ex)
-            result.update(wim_status='WIM_ERROR', error_msg=truncate(ex))
-
-        result = filter_keys(result, fields)
-
-        action_changes = remove_none_items({
-            'extra': merge_dicts(self.extra, result),
-            'status': 'BUILD' if result['wim_status'] == 'BUILD' else None,
-            'error_msg': result['error_msg'],
-            'modified_at': time()})
-        link_changes = merge_dicts(result, status=result.pop('wim_status'))
-        # ^  Rename field: wim_status => status
-
-        persistence.update_wan_link(self.item_id,
-                                    remove_none_items(link_changes))
-
-        self.save(persistence, **action_changes)
-
-        return result
-
-
-class WanLinkCreate(RefreshMixin, CreateAction):
-    def fail(self, persistence, reason, status='FAILED'):
-        changes = {'status': 'ERROR', 'error_msg': truncate(reason)}
-        persistence.update_wan_link(self.item_id, changes)
-        return super(WanLinkCreate, self).fail(persistence, reason, status)
-
-    def process(self, connector, persistence, ovim):
-        """Process the current task.
-        First we check if all the dependencies are ready,
-        then we call ``execute`` to actually execute the action.
-
-        Arguments:
-            connector: object with API for accessing the WAN
-                Infrastructure Manager system
-            persistence: abstraction layer for the database
-            ovim: instance of openvim, abstraction layer that enable
-                SDN-related operations
-        """
-        wan_link = persistence.get_by_uuid('instance_wim_nets', self.item_id)
-
-        # First we check if all the dependencies are solved
-        instance_nets = persistence.get_instance_nets(
-            wan_link['instance_scenario_id'], wan_link['sce_net_id'])
-
-        try:
-            dependency_statuses = [n['status'] for n in instance_nets]
-        except KeyError:
-            self.logger.debug('`status` not found in\n\n%s\n\n',
-                              json.dumps(instance_nets, indent=4))
-        errored = [instance_nets[i]
-                   for i, status in enumerate(dependency_statuses)
-                   if status in INSTANCE_NET_STATUS_ERROR]
-        if errored:
-            return self.fail(
-                persistence,
-                'Impossible to stablish WAN connectivity due to an issue '
-                'with the local networks:\n\t' +
-                '\n\t'.join('{uuid}: {status}'.format(**n) for n in errored))
-
-        pending = [instance_nets[i]
-                   for i, status in enumerate(dependency_statuses)
-                   if status in INSTANCE_NET_STATUS_PENDING]
-        if pending:
-            return self.defer(
-                persistence,
-                'Still waiting for the local networks to be active:\n\t' +
-                '\n\t'.join('{uuid}: {status}'.format(**n) for n in pending))
-
-        return self.execute(connector, persistence, ovim, instance_nets)
-
-    def _get_connection_point_info(self, persistence, ovim, instance_net):
-        """Retrieve information about the connection PoP <> WAN
-
-        Arguments:
-            persistence: object that encapsulates persistence logic
-                (e.g. db connection)
-            ovim: object that encapsulates network management logic (openvim)
-            instance_net: record with the information about a local network
-                (inside a VIM). This network will be connected via a WAN link
-                to a different network in a distinct VIM.
-                This method is used to trace what would be the way this network
-                can be accessed from the outside world.
-
-        Returns:
-            dict: Record representing the wan_port_mapping associated to the
-                  given instance_net. The expected fields are:
-                  **wim_id**, **datacenter_id**, **pop_switch_dpid** (the local
-                  network is expected to be connected at this switch),
-                  **pop_switch_port**, **wan_service_endpoint_id**,
-                  **wan_service_mapping_info**.
-        """
-        # First, we need to find a route from the datacenter to the outside
-        # world. For that, we can use the rules given in the datacenter
-        # configuration:
-        datacenter_id = instance_net['datacenter_id']
-        datacenter = persistence.get_datacenter_by(datacenter_id)
-        rules = safe_get(datacenter, 'config.external_connections', {}) or {}
-        vim_info = instance_net.get('vim_info', {}) or {}
-        # Alternatively, we can look for it, using the SDN assist
-        external_port = (self._evaluate_rules(rules, vim_info) or
-                         self._get_port_sdn(ovim, instance_net))
-
-        if not external_port:
-            raise NoExternalPortFound(instance_net)
-
-        # Then, we find the WAN switch that is connected to this external port
-        try:
-            wim_account = persistence.get_wim_account_by(
-                uuid=self.wim_account_id)
-
-            criteria = {
-                'wim_id': wim_account['wim_id'],
-                'pop_switch_dpid': external_port[0],
-                'pop_switch_port': external_port[1],
-                'datacenter_id': datacenter_id}
-
-            wan_port_mapping = persistence.query_one(
-                FROM='wim_port_mappings',
-                WHERE=criteria)
-        except NoRecordFound:
-            ex = InconsistentState('No WIM port mapping found:'
-                                   'wim_account: {}\ncriteria:\n{}'.format(
-                                       self.wim_account_id, pformat(criteria)))
-            reraise(ex.__class__, ex, exc_info()[2])
-
-        # It is important to return encapsulation information if present
-        mapping = merge_dicts(
-            wan_port_mapping.get('wan_service_mapping_info'),
-            filter_keys(vim_info, ('encapsulation_type', 'encapsulation_id'))
-        )
-
-        return merge_dicts(wan_port_mapping, wan_service_mapping_info=mapping)
-
-    def _get_port_sdn(self, ovim, instance_net):
-        criteria = {'net_id': instance_net['sdn_net_id']}
-        try:
-            local_port_mapping = ovim.get_ports(filter=criteria)
-
-            if local_port_mapping:
-                return (local_port_mapping[0]['switch_dpid'],
-                        local_port_mapping[0]['switch_port'])
-        except:  # noqa
-            self.logger.exception('Problems when calling OpenVIM')
-
-        self.logger.debug('No ports found using criteria:\n%r\n.', criteria)
-        return None
-
-    def _evaluate_rules(self, rules, vim_info):
-        """Given a ``vim_info`` dict from a ``instance_net`` record, evaluate
-        the set of rules provided during the VIM/datacenter registration to
-        determine an external port used to connect that VIM/datacenter to
-        other ones where different parts of the NS will be instantiated.
-
-        For example, considering a VIM/datacenter is registered like the
-        following::
-
-            vim_record = {
-              "uuid": ...
-              ...  # Other properties associated with the VIM/datacenter
-              "config": {
-                ...  # Other configuration
-                "external_connections": [
-                  {
-                    "condition": {
-                      "provider:physical_network": "provider_net1",
-                      ...  # This method will look up all the keys listed here
-                           # in the instance_nets.vim_info dict and compare the
-                           # values. When all the values match, the associated
-                           # vim_external_port will be selected.
-                    },
-                    "vim_external_port": {"switch": "switchA", "port": "portB"}
-                  },
-                  ...  # The user can provide as many rules as needed, however
-                       # only the first one to match will be applied.
-                ]
-              }
-            }
-
-        When an ``instance_net`` record is instantiated in that datacenter with
-        the following information::
-
-            instance_net = {
-              "uuid": ...
-              ...
-              "vim_info": {
-                ...
-                "provider_physical_network": "provider_net1",
-              }
-            }
-
-        Then, ``switchA`` and ``portB`` will be used to stablish the WAN
-        connection.
-
-        Arguments:
-            rules (list): Set of dicts containing the keys ``condition`` and
-                ``vim_external_port``. This list should be extracted from
-                ``vim['config']['external_connections']`` (as stored in the
-                database).
-            vim_info (dict): Information given by the VIM Connector, against
-               which the rules will be evaluated.
-
-        Returns:
-            tuple: switch id (local datacenter switch) and port or None if
-                the rule does not match.
-        """
-        rule = next((r for r in rules if self._evaluate_rule(r, vim_info)), {})
-        if 'vim_external_port' not in rule:
-            self.logger.debug('No external port found.\n'
-                              'rules:\n%r\nvim_info:\n%r\n\n', rules, vim_info)
-            return None
-
-        return (rule['vim_external_port']['switch'],
-                rule['vim_external_port']['port'])
-
-    @staticmethod
-    def _evaluate_rule(rule, vim_info):
-        """Evaluate the conditions from a single rule to ``vim_info`` and
-        determine if the rule should be applicable or not.
-
-        Please check :obj:`~._evaluate_rules` for more information.
-
-        Arguments:
-            rule (dict): Data structure containing the keys ``condition`` and
-                ``vim_external_port``. This should be one of the elements in
-                ``vim['config']['external_connections']`` (as stored in the
-                database).
-            vim_info (dict): Information given by the VIM Connector, against
-               which the rules will be evaluated.
-
-        Returns:
-            True or False: If all the conditions are met.
-        """
-        condition = rule.get('condition', {}) or {}
-        return all(safe_get(vim_info, k) == v for k, v in condition.items())
-
-    @staticmethod
-    def _derive_connection_point(wan_info):
-        point = {'service_endpoint_id': wan_info['wan_service_endpoint_id']}
-        # TODO: Cover other scenarios, e.g. VXLAN.
-        details = wan_info.get('wan_service_mapping_info', {})
-        if details.get('encapsulation_type') == 'vlan':
-            point['service_endpoint_encapsulation_type'] = 'dot1q'
-            point['service_endpoint_encapsulation_info'] = {
-                'vlan': details['encapsulation_id']
-            }
-        else:
-            point['service_endpoint_encapsulation_type'] = 'none'
-        return point
-
-    @staticmethod
-    def _derive_service_type(connection_points):
-        # TODO: add multipoint and L3 connectivity.
-        if len(connection_points) == 2:
-            return 'ELINE'
-        else:
-            raise NotImplementedError('Multipoint connectivity is not '
-                                      'supported yet.')
-
-    def _update_persistent_data(self, persistence, service_uuid, conn_info):
-        """Store plugin/connector specific information in the database"""
-        persistence.update_wan_link(self.item_id, {
-            'wim_internal_id': service_uuid,
-            'wim_info': {'conn_info': conn_info},
-            'status': 'BUILD'})
-
-    def execute(self, connector, persistence, ovim, instance_nets):
-        """Actually execute the action, since now we are sure all the
-        dependencies are solved
-        """
-        try:
-            wan_info = (self._get_connection_point_info(persistence, ovim, net)
-                        for net in instance_nets)
-            connection_points = [self._derive_connection_point(w)
-                                 for w in wan_info]
-
-            uuid, info = connector.create_connectivity_service(
-                self._derive_service_type(connection_points),
-                connection_points
-                # TODO: other properties, e.g. bandwidth
-            )
-        except (WimConnectorError, InconsistentState,
-                NoExternalPortFound) as ex:
-            self.logger.exception(ex)
-            return self.fail(
-                persistence,
-                'Impossible to stablish WAN connectivity.\n\t{}'.format(ex))
-
-        self.logger.debug('WAN connectivity established %s\n%s\n',
-                          uuid, json.dumps(info, indent=4))
-        self.wim_internal_id = uuid
-        self._update_persistent_data(persistence, uuid, info)
-        self.succeed(persistence)
-        return uuid
-
-
-class WanLinkDelete(DeleteAction):
-    def succeed(self, persistence):
-        try:
-            persistence.update_wan_link(self.item_id, {'status': 'DELETED'})
-        except NoRecordFound:
-            self.logger.debug('%s(%s) record already deleted',
-                              self.item, self.item_id)
-
-        return super(WanLinkDelete, self).succeed(persistence)
-
-    def get_wan_link(self, persistence):
-        """Retrieve information about the wan_link
-
-        It might be cached, or arrive from the database
-        """
-        if self.extra.get('wan_link'):
-            # First try a cached version of the data
-            return self.extra['wan_link']
-
-        return persistence.get_by_uuid(
-            'instance_wim_nets', self.item_id)
-
-    def process(self, connector, persistence, ovim):
-        """Delete a WAN link previously created"""
-        wan_link = self.get_wan_link(persistence)
-        if 'ERROR' in (wan_link.get('status') or ''):
-            return self.fail(
-                persistence,
-                'Impossible to delete WAN connectivity, '
-                'it was never successfully established:'
-                '\n\t{}'.format(wan_link['error_msg']))
-
-        internal_id = wan_link.get('wim_internal_id') or self.internal_id
-
-        if not internal_id:
-            self.logger.debug('No wim_internal_id found in\n%s\n%s\n'
-                              'Assuming no network was created yet, '
-                              'so no network have to be deleted.',
-                              json.dumps(wan_link, indent=4),
-                              json.dumps(self.as_dict(), indent=4))
-            return self.succeed(persistence)
-
-        try:
-            id = self.wim_internal_id
-            conn_info = safe_get(wan_link, 'wim_info.conn_info')
-            self.logger.debug('Connection Service %s (wan_link: %s):\n%s\n',
-                              id, wan_link['uuid'],
-                              json.dumps(conn_info, indent=4))
-            result = connector.delete_connectivity_service(id, conn_info)
-        except (WimConnectorError, InconsistentState) as ex:
-            self.logger.exception(ex)
-            return self.fail(
-                persistence,
-                'Impossible to delete WAN connectivity.\n\t{}'.format(ex))
-
-        self.logger.debug('WAN connectivity removed %s', result)
-        self.succeed(persistence)
-
-        return result
-
-
-class WanLinkFind(RefreshMixin, FindAction):
-    pass
-
-
-ACTIONS = {
-    'CREATE': WanLinkCreate,
-    'DELETE': WanLinkDelete,
-    'FIND': WanLinkFind,
-}
diff --git a/osm_ro/wim/wim_thread.py b/osm_ro/wim/wim_thread.py
deleted file mode 100644 (file)
index f37aba7..0000000
+++ /dev/null
@@ -1,442 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-
-"""
-Thread-based interaction with WIMs. Tasks are stored in the
-database (vim_wim_actions table) and processed sequentially
-
-Please check the Action class for information about the content of each action.
-"""
-
-import logging
-import threading
-from contextlib import contextmanager
-from functools import partial
-from itertools import islice, chain, takewhile
-from operator import itemgetter, attrgetter
-from sys import exc_info
-from time import time, sleep
-
-from six import reraise
-from six.moves import queue
-
-from . import wan_link_actions
-from ..utils import ensure, partition, pipe
-from .actions import IGNORE, PENDING, REFRESH
-from .errors import (
-    DbBaseException,
-    QueueFull,
-    InvalidParameters as Invalid,
-    UndefinedAction,
-)
-from .failing_connector import FailingConnector
-from .wimconn import WimConnectorError
-from .wimconn_dynpac import DynpacConnector
-from .wimconn_fake import FakeConnector
-from .wimconn_ietfl2vpn import WimconnectorIETFL2VPN
-
-ACTIONS = {
-    'instance_wim_nets': wan_link_actions.ACTIONS
-}
-
-CONNECTORS = {
-    # "odl": wimconn_odl.OdlConnector,
-    "dynpac": DynpacConnector,
-    "fake": FakeConnector,
-    "tapi": WimconnectorIETFL2VPN,
-    # Add extra connectors here
-}
-
-
-class WimThread(threading.Thread):
-    """Specialized task queue implementation that runs in an isolated thread.
-
-    Objects of this class have a few methods that are intended to be used
-    outside of the thread:
-
-    - start
-    - insert_task
-    - reload
-    - exit
-
-    All the other methods are used internally to manipulate/process the task
-    queue.
-    """
-    RETRY_SCHEDULED = 10  # 10 seconds
-    REFRESH_BUILD = 10    # 10 seconds
-    REFRESH_ACTIVE = 60   # 1 minute
-    BATCH = 10            # 10 actions per round
-    QUEUE_SIZE = 2000
-    RECOVERY_TIME = 5     # Sleep 5s to leave the system some time to recover
-    MAX_RECOVERY_TIME = 180
-    WAITING_TIME = 1      # Wait 1s for taks to arrive, when there are none
-
-    def __init__(self, persistence, wim_account, logger=None, ovim=None):
-        """Init a thread.
-
-        Arguments:
-            persistence: Database abstraction layer
-            wim_account: Record containing wim_account, tenant and wim
-                information.
-        """
-        name = '{}.{}.{}'.format(wim_account['wim']['name'],
-                                 wim_account['name'], wim_account['uuid'])
-        super(WimThread, self).__init__(name=name)
-
-        self.name = name
-        self.connector = None
-        self.wim_account = wim_account
-
-        self.logger = logger or logging.getLogger('openmano.wim.'+self.name)
-        self.persist = persistence
-        self.ovim = ovim
-
-        self.task_queue = queue.Queue(self.QUEUE_SIZE)
-
-        self.refresh_tasks = []
-        """Time ordered task list for refreshing the status of WIM nets"""
-
-        self.pending_tasks = []
-        """Time ordered task list for creation, deletion of WIM nets"""
-
-        self.grouped_tasks = {}
-        """ It contains all the creation/deletion pending tasks grouped by
-        its concrete vm, net, etc
-
-            <item><item_id>:
-                -   <task1>  # e.g. CREATE task
-                    <task2>  # e.g. DELETE task
-        """
-
-        self._insert_task = {
-            PENDING: partial(self.schedule, list_name='pending'),
-            REFRESH: partial(self.schedule, list_name='refresh'),
-            IGNORE: lambda task, *_, **__: task.save(self.persist)}
-        """Send the task to the right processing queue"""
-
-    def on_start(self):
-        """Run a series of procedures every time the thread (re)starts"""
-        self.connector = self.get_connector()
-        self.reload_actions()
-
-    def get_connector(self):
-        """Create an WimConnector instance according to the wim.type"""
-        error_msg = ''
-        account_id = self.wim_account['uuid']
-        try:
-            account = self.persist.get_wim_account_by(
-                uuid=account_id, hide=None)  # Credentials need to be available
-            wim = account['wim']
-            mapping = self.persist.query('wim_port_mappings',
-                                         WHERE={'wim_id': wim['uuid']},
-                                         error_if_none=False)
-            return CONNECTORS[wim['type']](wim, account, {
-                'service_endpoint_mapping': mapping or []
-            })
-        except DbBaseException as ex:
-            error_msg = ('Error when retrieving WIM account ({})\n'
-                         .format(account_id)) + str(ex)
-            self.logger.error(error_msg, exc_info=True)
-        except KeyError as ex:
-            error_msg = ('Unable to find the WIM connector for WIM ({})\n'
-                         .format(wim['type'])) + str(ex)
-            self.logger.error(error_msg, exc_info=True)
-        except (WimConnectorError, Exception) as ex:
-            # TODO: Remove the Exception class here when the connector class is
-            # ready
-            error_msg = ('Error when loading WIM connector for WIM ({})\n'
-                         .format(wim['type'])) + str(ex)
-            self.logger.error(error_msg, exc_info=True)
-
-        error_msg_extra = ('Any task targeting WIM account {} ({}) will fail.'
-                           .format(account_id, self.wim_account.get('name')))
-        self.logger.warning(error_msg_extra)
-        return FailingConnector(error_msg + '\n' + error_msg_extra)
-
-    @contextmanager
-    def avoid_exceptions(self):
-        """Make a real effort to keep the thread alive, by avoiding the
-        exceptions. They are instead logged as a critical errors.
-        """
-        try:
-            yield
-        except Exception as ex:
-            self.logger.critical("Unexpected exception %s", ex, exc_info=True)
-            sleep(self.RECOVERY_TIME)
-
-    def reload_actions(self, group_limit=100):
-        """Read actions from database and reload them at memory.
-
-        This method will clean and reload the attributes ``refresh_tasks``,
-        ``pending_tasks`` and ``grouped_tasks``
-
-        Attributes:
-            group_limit (int): maximum number of action groups (those that
-                refer to the same ``<item, item_id>``) to be retrieved from the
-                database in each batch.
-        """
-
-        # First we clean the cache to let the garbage collector work
-        self.refresh_tasks = []
-        self.pending_tasks = []
-        self.grouped_tasks = {}
-
-        offset = 0
-
-        while True:
-            # Do things in batches
-            task_groups = self.persist.get_actions_in_groups(
-                self.wim_account['uuid'], item_types=('instance_wim_nets',),
-                group_offset=offset, group_limit=group_limit)
-            offset += (group_limit - 1)  # Update for the next batch
-
-            if not task_groups:
-                break
-
-            pending_groups = (g for _, g in task_groups if is_pending_group(g))
-
-            for task_list in pending_groups:
-                with self.avoid_exceptions():
-                    self.insert_pending_tasks(filter_pending_tasks(task_list))
-
-            self.logger.debug(
-                'Reloaded wim actions pending: %d refresh: %d',
-                len(self.pending_tasks), len(self.refresh_tasks))
-
-    def insert_pending_tasks(self, task_list):
-        """Insert task in the list of actions being processed"""
-        task_list = [action_from(task, self.logger) for task in task_list]
-
-        for task in task_list:
-            group = task.group_key
-            self.grouped_tasks.setdefault(group, [])
-            # Each task can try to supersede the other ones,
-            # but just DELETE actions will actually do
-            task.supersede(self.grouped_tasks[group])
-            self.grouped_tasks[group].append(task)
-
-        # We need a separate loop so each task can check all the other
-        # ones before deciding
-        for task in task_list:
-            self._insert_task[task.processing](task)
-            self.logger.debug('Insert WIM task: %s (%s): %s %s',
-                              task.id, task.status, task.action, task.item)
-
-    def schedule(self, task, when=None, list_name='pending'):
-        """Insert a task in the correct list, respecting the schedule.
-        The refreshing list is ordered by threshold_time (task.process_at)
-        It is assumed that this is called inside this thread
-
-        Arguments:
-            task (Action): object representing the task.
-                This object must implement the ``process`` method and inherit
-                from the ``Action`` class
-            list_name: either 'refresh' or 'pending'
-            when (float): unix time in seconds since as a float number
-        """
-        processing_list = {'refresh': self.refresh_tasks,
-                           'pending': self.pending_tasks}[list_name]
-
-        when = when or time()
-        task.process_at = when
-
-        schedule = (t.process_at for t in processing_list)
-        index = len(list(takewhile(lambda moment: moment <= when, schedule)))
-
-        processing_list.insert(index, task)
-        self.logger.debug(
-            'Schedule of %s in "%s" - waiting position: %d (%f)',
-            task.id, list_name, index, task.process_at)
-
-        return task
-
-    def process_list(self, list_name='pending'):
-        """Process actions in batches and reschedule them if necessary"""
-        task_list, handler = {
-            'refresh': (self.refresh_tasks, self._refresh_single),
-            'pending': (self.pending_tasks, self._process_single)}[list_name]
-
-        now = time()
-        waiting = ((i, task) for i, task in enumerate(task_list)
-                   if task.process_at is None or task.process_at <= now)
-
-        is_superseded = pipe(itemgetter(1), attrgetter('is_superseded'))
-        superseded, active = partition(is_superseded, waiting)
-        superseded = [(i, t.save(self.persist)) for i, t in superseded]
-
-        batch = islice(active, self.BATCH)
-        refreshed = [(i, handler(t)) for i, t in batch]
-
-        # Since pop changes the indexes in the list, we need to do it backwards
-        remove = sorted([i for i, _ in chain(refreshed, superseded)])
-        return len([task_list.pop(i) for i in reversed(remove)])
-
-    def _refresh_single(self, task):
-        """Refresh just a single task, and reschedule it if necessary"""
-        now = time()
-
-        result = task.refresh(self.connector, self.persist)
-        self.logger.debug('Refreshing WIM task: %s (%s): %s %s => %r',
-                          task.id, task.status, task.action, task.item, result)
-
-        interval = self.REFRESH_BUILD if task.is_build else self.REFRESH_ACTIVE
-        self.schedule(task, now + interval, 'refresh')
-
-        return result
-
-    def _process_single(self, task):
-        """Process just a single task, and reschedule it if necessary"""
-        now = time()
-
-        result = task.process(self.connector, self.persist, self.ovim)
-        self.logger.debug('Executing WIM task: %s (%s): %s %s => %r',
-                          task.id, task.status, task.action, task.item, result)
-
-        if task.action == 'DELETE':
-            del self.grouped_tasks[task.group_key]
-
-        self._insert_task[task.processing](task, now + self.RETRY_SCHEDULED)
-
-        return result
-
-    def insert_task(self, task):
-        """Send a message to the running thread
-
-        This function is supposed to be called outside of the WIM Thread.
-
-        Arguments:
-            task (str or dict): `"exit"`, `"reload"` or dict representing a
-                task. For more information about the fields in task, please
-                check the Action class.
-        """
-        try:
-            self.task_queue.put(task, False)
-            return None
-        except queue.Full:
-            ex = QueueFull(self.name)
-            reraise(ex.__class__, ex, exc_info()[2])
-
-    def reload(self):
-        """Send a message to the running thread to reload itself"""
-        self.insert_task('reload')
-
-    def exit(self):
-        """Send a message to the running thread to kill itself"""
-        self.insert_task('exit')
-
-    def run(self):
-        self.logger.debug('Starting: %s', self.name)
-        recovery_time = 0
-        while True:
-            self.on_start()
-            reload_thread = False
-            self.logger.debug('Reloaded: %s', self.name)
-
-            while True:
-                with self.avoid_exceptions():
-                    while not self.task_queue.empty():
-                        task = self.task_queue.get()
-                        if isinstance(task, dict):
-                            self.insert_pending_tasks([task])
-                        elif isinstance(task, list):
-                            self.insert_pending_tasks(task)
-                        elif isinstance(task, str):
-                            if task == 'exit':
-                                self.logger.debug('Finishing: %s', self.name)
-                                return 0
-                            elif task == 'reload':
-                                reload_thread = True
-                                break
-                        self.task_queue.task_done()
-
-                    if reload_thread:
-                        break
-
-                    if not(self.process_list('pending') +
-                           self.process_list('refresh')):
-                        sleep(self.WAITING_TIME)
-
-                    if isinstance(self.connector, FailingConnector):
-                        # Wait sometime to try instantiating the connector
-                        # again and restart
-                        # Increase the recovery time if restarting is not
-                        # working (up to a limit)
-                        recovery_time = min(self.MAX_RECOVERY_TIME,
-                                            recovery_time + self.RECOVERY_TIME)
-                        sleep(recovery_time)
-                        break
-                    else:
-                        recovery_time = 0
-
-        self.logger.debug("Finishing")
-
-
-def is_pending_group(group):
-    return all(task['action'] != 'DELETE' or
-               task['status'] == 'SCHEDULED'
-               for task in group)
-
-
-def filter_pending_tasks(group):
-    return (t for t in group
-            if (t['status'] == 'SCHEDULED' or
-                t['action'] in ('CREATE', 'FIND')))
-
-
-def action_from(record, logger=None, mapping=ACTIONS):
-    """Create an Action object from a action record (dict)
-
-    Arguments:
-        mapping (dict): Nested data structure that maps the relationship
-            between action properties and object constructors.  This data
-            structure should be a dict with 2 levels of keys: item type and
-            action type. Example::
-                {'wan_link':
-                    {'CREATE': WanLinkCreate}
-                    ...}
-                ...}
-        record (dict): action information
-
-    Return:
-        (Action.Base): Object representing the action
-    """
-    ensure('item' in record, Invalid('`record` should contain "item"'))
-    ensure('action' in record, Invalid('`record` should contain "action"'))
-
-    try:
-        factory = mapping[record['item']][record['action']]
-        return factory(record, logger=logger)
-    except KeyError:
-        ex = UndefinedAction(record['item'], record['action'])
-        reraise(ex.__class__, ex, exc_info()[2])
diff --git a/osm_ro/wim/wimconn.py b/osm_ro/wim/wimconn.py
deleted file mode 100644 (file)
index 92b6db0..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-"""The WIM connector is responsible for establishing wide area network
-connectivity.
-
-It receives information from the WimThread/WAN Actions about the endpoints of
-a link that spans across multiple datacenters and stablish a path between them.
-"""
-import logging
-
-from ..http_tools.errors import HttpMappedError
-
-
-class WimConnectorError(HttpMappedError):
-    """Base Exception for all connector related errors"""
-
-
-class WimConnector(object):
-    """Abstract base class for all the WIM connectors
-
-    Arguments:
-        wim (dict): WIM record, as stored in the database
-        wim_account (dict): WIM account record, as stored in the database
-        config (dict): optional persistent information related to an specific
-            connector.  Inside this dict, a special key,
-            ``service_endpoint_mapping`` provides the internal endpoint
-            mapping.
-        logger (logging.Logger): optional logger object. If none is passed
-            ``openmano.wim.wimconn`` is used.
-
-    The arguments of the constructor are converted to object attributes.
-    An extra property, ``service_endpoint_mapping`` is created from ``config``.
-    """
-    def __init__(self, wim, wim_account, config=None, logger=None):
-        self.logger = logger or logging.getLogger('openmano.wim.wimconn')
-
-        self.wim = wim
-        self.wim_account = wim_account
-        self.config = config or {}
-        self.service_endpoint_mapping = (
-            config.get('service_endpoint_mapping', []))
-
-    def check_credentials(self):
-        """Check if the connector itself can access the WIM.
-
-        Raises:
-            WimConnectorError: Issues regarding authorization, access to
-                external URLs, etc are detected.
-        """
-        raise NotImplementedError
-
-    def get_connectivity_service_status(self, service_uuid, conn_info=None):
-        """Monitor the status of the connectivity service established
-
-        Arguments:
-            service_uuid (str): UUID of the connectivity service
-            conn_info (dict or None): Information returned by the connector
-                during the service creation/edition and subsequently stored in
-                the database.
-
-        Returns:
-            dict: JSON/YAML-serializable dict that contains a mandatory key
-                ``wim_status`` associated with one of the following values::
-
-                    {'wim_status': 'ACTIVE'}
-                        # The service is up and running.
-
-                    {'wim_status': 'INACTIVE'}
-                        # The service was created, but the connector
-                        # cannot determine yet if connectivity exists
-                        # (ideally, the caller needs to wait and check again).
-
-                    {'wim_status': 'DOWN'}
-                        # Connection was previously established,
-                        # but an error/failure was detected.
-
-                    {'wim_status': 'ERROR'}
-                        # An error occurred when trying to create the service/
-                        # establish the connectivity.
-
-                    {'wim_status': 'BUILD'}
-                        # Still trying to create the service, the caller
-                        # needs to wait and check again.
-
-                Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
-                keys can be used to provide additional status explanation or
-                new information available for the connectivity service.
-        """
-        raise NotImplementedError
-
-    def create_connectivity_service(self, service_type, connection_points,
-                                    **kwargs):
-        """Stablish WAN connectivity between the endpoints
-
-        Arguments:
-            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
-                ``L3``.
-            connection_points (list): each point corresponds to
-                an entry point from the DC to the transport network. One
-                connection point serves to identify the specific access and
-                some other service parameters, such as encapsulation type.
-                Represented by a dict as follows::
-
-                    {
-                      "service_endpoint_id": ..., (str[uuid])
-                      "service_endpoint_encapsulation_type": ...,
-                           (enum: none, dot1q, ...)
-                      "service_endpoint_encapsulation_info": {
-                        ... (dict)
-                        "vlan": ..., (int, present if encapsulation is dot1q)
-                        "vni": ... (int, present if encapsulation is vxlan),
-                        "peers": [(ipv4_1), (ipv4_2)]
-                            (present if encapsulation is vxlan)
-                      }
-                    }
-
-              The service endpoint ID should be previously informed to the WIM
-              engine in the RO when the WIM port mapping is registered.
-
-        Keyword Arguments:
-            bandwidth (int): value in kilobytes
-            latency (int): value in milliseconds
-
-        Other QoS might be passed as keyword arguments.
-
-        Returns:
-            tuple: ``(service_id, conn_info)`` containing:
-               - *service_uuid* (str): UUID of the established connectivity
-                  service
-               - *conn_info* (dict or None): Information to be stored at the
-                 database (or ``None``). This information will be provided to
-                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
-                 **MUST** be JSON/YAML-serializable (plain data structures).
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        raise NotImplementedError
-
-    def delete_connectivity_service(self, service_uuid, conn_info=None):
-        """Disconnect multi-site endpoints previously connected
-
-        This method should receive as arguments both the UUID and the
-        connection info dict (respectively), as returned by
-        :meth:`~.create_connectivity_service` and
-        :meth:`~.edit_connectivity_service`.
-
-        Arguments:
-            service_uuid (str): UUID of the connectivity service
-            conn_info (dict or None): Information returned by the connector
-                during the service creation and subsequently stored in the
-                database.
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        raise NotImplementedError
-
-    def edit_connectivity_service(self, service_uuid, conn_info=None,
-                                  connection_points=None, **kwargs):
-        """Change an existing connectivity service.
-
-        This method's arguments and return value follow the same convention as
-        :meth:`~.create_connectivity_service`.
-
-        Arguments:
-            service_uuid (str): UUID of the connectivity service.
-            conn_info (dict or None): Information previously stored in the
-                database.
-            connection_points (list): If provided, the old list of connection
-                points will be replaced.
-
-        Returns:
-            dict or None: Information to be updated and stored at the
-                database.
-                When ``None`` is returned, no information should be changed.
-                When an empty dict is returned, the database record will be
-                deleted.
-                **MUST** be JSON/YAML-serializable (plain data structures).
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        raise NotImplementedError
-
-    def clear_all_connectivity_services(self):
-        """Delete all WAN Links in a WIM.
-
-        This method is intended for debugging only, and should delete all the
-        connections controlled by the WIM, not only the WIM connections that
-        a specific RO is aware of.
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        raise NotImplementedError
-
-    def get_all_active_connectivity_services(self):
-        """Provide information about all active connections provisioned by a
-        WIM.
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        raise NotImplementedError
diff --git a/osm_ro/wim/wimconn_dynpac.py b/osm_ro/wim/wimconn_dynpac.py
deleted file mode 100644 (file)
index cc9376b..0000000
+++ /dev/null
@@ -1,235 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2018 David García, University of the Basque Country
-# Copyright 2018 University of the Basque Country
-# This file is part of openmano
-# All Rights Reserved.
-# Contact information at http://i2t.ehu.eus
-#
-# # Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import requests
-import json
-import logging
-from enum import Enum
-
-from wimconn import WimConnector, WimConnectorError
-
-
-class WimError(Enum):
-    UNREACHABLE = 'Unable to reach the WIM.',
-    SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.',
-    CONNECTION_POINTS_SIZE = \
-        'Unexpected number of connection points: 2 expected.',
-    ENCAPSULATION_TYPE = \
-        'Unexpected service_endpoint_encapsulation_type. \
-         Only "dotq1" is accepted.',
-    BANDWIDTH = 'Unable to get the bandwidth.',
-    STATUS = 'Unable to get the status for the service.',
-    DELETE = 'Unable to delete service.',
-    CLEAR_ALL = 'Unable to clear all the services',
-    UNKNOWN_ACTION = 'Unknown action invoked.',
-    BACKUP = 'Unable to get the backup parameter.',
-    UNSUPPORTED_FEATURE = "Unsupported feature",
-    UNAUTHORIZED = "Failed while authenticating"
-
-
-class WimAPIActions(Enum):
-    CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY",
-    CREATE_SERVICE = "CREATE_SERVICE",
-    DELETE_SERVICE = "DELETE_SERVICE",
-    CLEAR_ALL = "CLEAR_ALL",
-    SERVICE_STATUS = "SERVICE_STATUS",
-
-
-class DynpacConnector(WimConnector):
-    __supported_service_types = ["ELINE (L2)", "ELINE"]
-    __supported_encapsulation_types = ["dot1q"]
-    __WIM_LOGGER = 'openmano.wimconn.dynpac'
-    __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
-    __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
-    __BACKUP_PARAM = "backup"
-    __BANDWIDTH_PARAM = "bandwidth"
-    __SERVICE_ENDPOINT_PARAM = "service_endpoint_id"
-    __WAN_SERVICE_ENDPOINT_PARAM = "wan_service_endpoint_id"
-    __WAN_MAPPING_INFO_PARAM = "wan_service_mapping_info"
-    __SW_ID_PARAM = "wan_switch_dpid"
-    __SW_PORT_PARAM = "wan_switch_port"
-    __VLAN_PARAM = "vlan"
-
-    # Public functions exposed to the Resource Orchestrator
-    def __init__(self, wim, wim_account, config):
-        self.logger = logging.getLogger(self.__WIM_LOGGER)
-        self.__wim = wim
-        self.__wim_account = wim_account
-        self.__config = config
-        self.__wim_url = self.__wim.get("wim_url")
-        self.__user = wim_account.get("user")
-        self.__passwd = wim_account.get("passwd")
-        self.logger.info("Initialized.")
-
-    def create_connectivity_service(self,
-                                    service_type,
-                                    connection_points,
-                                    **kwargs):
-        self.__check_service(service_type, connection_points, kwargs)
-
-        body = self.__get_body(service_type, connection_points, kwargs)
-
-        headers = {'Content-type': 'application/x-www-form-urlencoded'}
-        endpoint = "{}/service/create".format(self.__wim_url)
-
-        try:
-            response = requests.post(endpoint, data=body, headers=headers)
-        except requests.exceptions.RequestException as e:
-            self.__exception(e.message, http_code=503)
-
-        if response.status_code != 200:
-            error = json.loads(response.content)
-            reason = "Reason: {}. ".format(error.get("code"))
-            description = "Description: {}.".format(error.get("description"))
-            exception = reason + description
-            self.__exception(exception, http_code=response.status_code)
-        uuid = response.content
-        self.logger.info("Service with uuid {} created.".format(uuid))
-        return (uuid, None)
-
-    def edit_connectivity_service(self, service_uuid,
-                                  conn_info, connection_points,
-                                  **kwargs):
-        self.__exception(WimError.UNSUPPORTED_FEATURE, http_code=501)
-
-    def get_connectivity_service_status(self, service_uuid):
-        endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid)
-        try:
-            response = requests.get(endpoint)
-        except requests.exceptions.RequestException as e:
-            self.__exception(e.message, http_code=503)
-
-        if response.status_code != 200:
-            self.__exception(WimError.STATUS, http_code=response.status_code)
-        self.logger.info("Status for service with uuid {}: {}"
-                         .format(service_uuid, response.content))
-        return response.content
-
-    def delete_connectivity_service(self, service_uuid, conn_info):
-        endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid)
-        try:
-            response = requests.delete(endpoint)
-        except requests.exceptions.RequestException as e:
-            self.__exception(e.message, http_code=503)
-        if response.status_code != 200:
-            self.__exception(WimError.DELETE, http_code=response.status_code)
-
-        self.logger.info("Service with uuid: {} deleted".format(service_uuid))
-
-    def clear_all_connectivity_services(self):
-        endpoint = "{}/service/clearAll".format(self.__wim_url)
-        try:
-            response = requests.delete(endpoint)
-            http_code = response.status_code
-        except requests.exceptions.RequestException as e:
-            self.__exception(e.message, http_code=503)
-        if http_code != 200:
-            self.__exception(WimError.CLEAR_ALL, http_code=http_code)
-
-        self.logger.info("{} services deleted".format(response.content))
-        return "{} services deleted".format(response.content)
-
-    def check_connectivity(self):
-        endpoint = "{}/checkConnectivity".format(self.__wim_url)
-
-        try:
-            response = requests.get(endpoint)
-            http_code = response.status_code
-        except requests.exceptions.RequestException as e:
-            self.__exception(e.message, http_code=503)
-
-        if http_code != 200:
-            self.__exception(WimError.UNREACHABLE, http_code=http_code)
-        self.logger.info("Connectivity checked")
-
-    def check_credentials(self):
-        endpoint = "{}/checkCredentials".format(self.__wim_url)
-        auth = (self.__user, self.__passwd)
-
-        try:
-            response = requests.get(endpoint, auth=auth)
-            http_code = response.status_code
-        except requests.exceptions.RequestException as e:
-            self.__exception(e.message, http_code=503)
-
-        if http_code != 200:
-            self.__exception(WimError.UNAUTHORIZED, http_code=http_code)
-        self.logger.info("Credentials checked")
-
-    # Private functions
-    def __exception(self, x, **kwargs):
-        http_code = kwargs.get("http_code")
-        if hasattr(x, "value"):
-            error = x.value
-        else:
-            error = x
-        self.logger.error(error)
-        raise WimConnectorError(error, http_code=http_code)
-
-    def __check_service(self, service_type, connection_points, kwargs):
-        if service_type not in self.__supported_service_types:
-            self.__exception(WimError.SERVICE_TYPE_ERROR, http_code=400)
-
-        if len(connection_points) != 2:
-            self.__exception(WimError.CONNECTION_POINTS_SIZE, http_code=400)
-
-        for connection_point in connection_points:
-            enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM)
-            if enc_type not in self.__supported_encapsulation_types:
-                self.__exception(WimError.ENCAPSULATION_TYPE, http_code=400)
-
-        # Commented out for as long as parameter isn't implemented
-        # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM)
-        # if not isinstance(bandwidth, int):
-            # self.__exception(WimError.BANDWIDTH, http_code=400)
-
-        # Commented out for as long as parameter isn't implemented
-        # backup = kwargs.get(self.__BACKUP_PARAM)
-        # if not isinstance(backup, bool):
-            # self.__exception(WimError.BACKUP, http_code=400)
-
-    def __get_body(self, service_type, connection_points, kwargs):
-        port_mapping = self.__config.get("service_endpoint_mapping")
-        selected_ports = []
-        for connection_point in connection_points:
-            endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM)
-            port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0]
-            port_info = port.get(self.__WAN_MAPPING_INFO_PARAM)
-            selected_ports.append(port_info)
-        if service_type == "ELINE (L2)" or service_type == "ELINE":
-            service_type = "L2"
-        body = {
-            "connection_points": [{
-                "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM),
-                "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM),
-                "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
-            }, {
-                "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
-                "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
-                "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
-            }],
-            "bandwidth": 100,  # Hardcoded for as long as parameter isn't implemented
-            "service_type": service_type,
-            "backup": False    # Hardcoded for as long as parameter isn't implemented
-        }
-        return "body={}".format(json.dumps(body))
diff --git a/osm_ro/wim/wimconn_fake.py b/osm_ro/wim/wimconn_fake.py
deleted file mode 100644 (file)
index 36929f4..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 Telefonica
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This WIM does nothing and allows using it for testing and when no WIM is needed
-"""
-
-import logging
-from uuid import uuid4
-from .wimconn import WimConnector
-
-__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
-
-
-class FakeConnector(WimConnector):
-    """Abstract base class for all the WIM connectors
-
-    Arguments:
-        wim (dict): WIM record, as stored in the database
-        wim_account (dict): WIM account record, as stored in the database
-        config (dict): optional persistent information related to an specific
-            connector.  Inside this dict, a special key,
-            ``service_endpoint_mapping`` provides the internal endpoint
-            mapping.
-        logger (logging.Logger): optional logger object. If none is passed
-            ``openmano.wim.wimconn`` is used.
-
-    The arguments of the constructor are converted to object attributes.
-    An extra property, ``service_endpoint_mapping`` is created from ``config``.
-    """
-    def __init__(self, wim, wim_account, config=None, logger=None):
-        self.logger = logging.getLogger('openmano.wimconn.fake')
-        super(FakeConnector, self).__init__(wim, wim_account, config, logger)
-        self.logger.debug("__init: wim='{}' wim_account='{}'".format(wim, wim_account))
-        self.connections = {}
-        self.counter = 0
-
-    def check_credentials(self):
-        """Check if the connector itself can access the WIM.
-
-        Raises:
-            WimConnectorError: Issues regarding authorization, access to
-                external URLs, etc are detected.
-        """
-        self.logger.debug("check_credentials")
-        return None
-
-    def get_connectivity_service_status(self, service_uuid, conn_info=None):
-        """Monitor the status of the connectivity service established
-
-        Arguments:
-            service_uuid (str): UUID of the connectivity service
-            conn_info (dict or None): Information returned by the connector
-                during the service creation/edition and subsequently stored in
-                the database.
-
-        Returns:
-            dict: JSON/YAML-serializable dict that contains a mandatory key
-                ``wim_status`` associated with one of the following values::
-
-                Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
-                keys can be used to provide additional status explanation or
-                new information available for the connectivity service.
-        """
-        self.logger.debug("get_connectivity_service_status: service_uuid='{}' conn_info='{}'".format(service_uuid,
-                                                                                                     conn_info))
-        return {'wim_status': 'ACTIVE', 'wim_info': self.connectivity.get(service_uuid)}
-
-    def create_connectivity_service(self, service_type, connection_points,
-                                    **kwargs):
-        """
-        Stablish WAN connectivity between the endpoints
-
-        """
-        self.logger.debug("create_connectivity_service: service_type='{}' connection_points='{}', kwargs='{}'".
-                          format(service_type, connection_points, kwargs))
-        _id = str(uuid4())
-        self.connectivity[_id] = {"nb": self.counter}
-        self.counter += 1
-        return _id, self.connectivity[_id]
-
-    def delete_connectivity_service(self, service_uuid, conn_info=None):
-        """Disconnect multi-site endpoints previously connected
-
-        """
-        self.logger.debug("delete_connectivity_service: service_uuid='{}' conn_info='{}'".format(service_uuid,
-                                                                                                 conn_info))
-        self.connectivity.pop(service_uuid, None)
-        return None
-
-    def edit_connectivity_service(self, service_uuid, conn_info=None,
-                                  connection_points=None, **kwargs):
-        """Change an existing connectivity service.
-
-        This method's arguments and return value follow the same convention as
-        :meth:`~.create_connectivity_service`.
-        """
-        self.logger.debug("edit_connectivity_service: service_uuid='{}' conn_info='{}', connection_points='{}'"
-                          "kwargs='{}'".format(service_uuid, conn_info, connection_points, kwargs))
-        return None
-
-    def clear_all_connectivity_services(self):
-        """Delete all WAN Links in a WIM.
-
-        This method is intended for debugging only, and should delete all the
-        connections controlled by the WIM, not only the WIM connections that
-        a specific RO is aware of.
-
-        """
-        self.logger.debug("clear_all_connectivity_services")
-        self.connectivity.clear()
-        return None
-
-    def get_all_active_connectivity_services(self):
-        """Provide information about all active connections provisioned by a
-        WIM.
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        self.logger.debug("get_all_active_connectivity_services")
-        return self.connectivity
diff --git a/osm_ro/wim/wimconn_ietfl2vpn.py b/osm_ro/wim/wimconn_ietfl2vpn.py
deleted file mode 100644 (file)
index dc7cc97..0000000
+++ /dev/null
@@ -1,362 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 Telefonica
-# All Rights Reserved.
-#
-# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This work has been performed in the context of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 program.
-##
-"""The WIM connector is responsible for establishing wide area network
-connectivity.
-
-This WIM connector implements the standard IETF RFC 8466 "A YANG Data
- Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
-
-It receives the endpoints and the necessary details to request
-the Layer 2 service.
-"""
-import requests
-import uuid
-import logging
-from .wimconn import WimConnector, WimConnectorError
-"""CHeck layer where we move it"""
-
-
-class WimconnectorIETFL2VPN(WimConnector):
-
-    def __init__(self, wim, wim_account, config=None, logger=None):
-        """IETF L2VPM WIM connector
-
-        Arguments: (To be completed)
-            wim (dict): WIM record, as stored in the database
-            wim_account (dict): WIM account record, as stored in the database
-        """
-        self.logger = logging.getLogger('openmano.wimconn.ietfl2vpn')
-        super(WimconnectorIETFL2VPN, self).__init__(wim, wim_account, config, logger)
-        self.headers = {'Content-Type': 'application/json'}
-        self.mappings = {m['wan_service_endpoint_id']: m
-                         for m in self.service_endpoint_mapping}
-        self.user = wim_account.get("user")
-        self.passwd = wim_account.get("passwd")
-        if self.user and self.passwd is not None:
-            self.auth = (self.user, self.passwd)
-        else:
-            self.auth = None
-        self.logger.info("IETFL2VPN Connector Initialized.")
-
-    def check_credentials(self):
-        endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
-        try:
-            response = requests.get(endpoint, auth=self.auth)    
-            http_code = response.status_code
-        except requests.exceptions.RequestException as e:
-            raise WimConnectorError(e.message, http_code=503)
-
-        if http_code != 200:
-            raise WimConnectorError("Failed while authenticating", http_code=http_code)
-        self.logger.info("Credentials checked")
-
-    def get_connectivity_service_status(self, service_uuid, conn_info=None):
-        """Monitor the status of the connectivity service stablished
-
-        Arguments:
-            service_uuid: Connectivity service unique identifier
-
-        Returns:
-            Examples::
-                {'wim_status': 'ACTIVE'}
-                {'wim_status': 'INACTIVE'}
-                {'wim_status': 'DOWN'}
-                {'wim_status': 'ERROR'}
-        """
-        try:
-            self.logger.info("Sending get connectivity service stuatus")
-            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
-                self.wim["wim_url"], service_uuid)
-            response = requests.get(servicepoint, auth=self.auth)
-            if response.status_code != requests.codes.ok:
-                raise WimConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
-            service_status = {'wim_status': 'ACTIVE'}
-            return service_status
-        except requests.exceptions.ConnectionError:
-            raise WimConnectorError("Request Timeout", http_code=408)
-               
-    def search_mapp(self, connection_point):
-        id = connection_point['service_endpoint_id']
-        if id not in self.mappings:         
-            raise WimConnectorError("Endpoint {} not located".format(str(id)))
-        else:
-            return self.mappings[id]
-
-    def create_connectivity_service(self, service_type, connection_points, **kwargs):
-        """Stablish WAN connectivity between the endpoints
-
-        Arguments:
-            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
-                ``L3``.
-            connection_points (list): each point corresponds to
-                an entry point from the DC to the transport network. One
-                connection point serves to identify the specific access and
-                some other service parameters, such as encapsulation type.
-                Represented by a dict as follows::
-
-                    {
-                      "service_endpoint_id": ..., (str[uuid])
-                      "service_endpoint_encapsulation_type": ...,
-                           (enum: none, dot1q, ...)
-                      "service_endpoint_encapsulation_info": {
-                        ... (dict)
-                        "vlan": ..., (int, present if encapsulation is dot1q)
-                        "vni": ... (int, present if encapsulation is vxlan),
-                        "peers": [(ipv4_1), (ipv4_2)]
-                            (present if encapsulation is vxlan)
-                      }
-                    }
-
-              The service endpoint ID should be previously informed to the WIM
-              engine in the RO when the WIM port mapping is registered.
-
-        Keyword Arguments:
-            bandwidth (int): value in kilobytes
-            latency (int): value in milliseconds
-
-        Other QoS might be passed as keyword arguments.
-
-        Returns:
-            tuple: ``(service_id, conn_info)`` containing:
-               - *service_uuid* (str): UUID of the established connectivity
-                  service
-               - *conn_info* (dict or None): Information to be stored at the
-                 database (or ``None``). This information will be provided to
-                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
-                 **MUST** be JSON/YAML-serializable (plain data structures).
-
-        Raises:
-            WimConnectorException: In case of error.
-        """
-        if service_type == "ELINE":
-            if len(connection_points) > 2:
-                raise WimConnectorError('Connections between more than 2 endpoints are not supported')
-            if len(connection_points) < 2:
-                raise WimConnectorError('Connections must be of at least 2 endpoints')
-            """ First step, create the vpn service """    
-            uuid_l2vpn = str(uuid.uuid4())
-            vpn_service = {}
-            vpn_service["vpn-id"] = uuid_l2vpn
-            vpn_service["vpn-scv-type"] = "vpws"
-            vpn_service["svc-topo"] = "any-to-any"
-            vpn_service["customer-name"] = "osm"
-            vpn_service_list = []
-            vpn_service_list.append(vpn_service)
-            vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list}
-            response_service_creation = None
-            conn_info = []
-            self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
-            try:
-                endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
-                    self.wim["wim_url"])
-                response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
-                                                          json=vpn_service_l, auth=self.auth)
-            except requests.exceptions.ConnectionError:
-                raise WimConnectorError("Request to create service Timeout", http_code=408)
-            if response_service_creation.status_code == 409:
-                raise WimConnectorError("Service already exists", http_code=response_service_creation.status_code)
-            elif response_service_creation.status_code != requests.codes.created:
-                raise WimConnectorError("Request to create service not accepted",
-                                        http_code=response_service_creation.status_code)
-            """ Second step, create the connections and vpn attachments """   
-            for connection_point in connection_points:
-                connection_point_wan_info = self.search_mapp(connection_point)
-                site_network_access = {}
-                connection = {}
-                if connection_point["service_endpoint_encapsulation_type"] != "none":
-                    if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
-                        """ The connection is a VLAN """
-                        connection["encapsulation-type"] = "dot1q-vlan-tagged"
-                        tagged = {}
-                        tagged_interf = {}
-                        service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
-                        if service_endpoint_encapsulation_info["vlan"] is None:
-                            raise WimConnectorError("VLAN must be provided")
-                        tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
-                        tagged["dot1q-vlan-tagged"] = tagged_interf
-                        connection["tagged-interface"] = tagged
-                    else:
-                        raise NotImplementedError("Encapsulation type not implemented")
-                site_network_access["connection"] = connection
-                self.logger.info("Sending connection:{}".format(connection))
-                vpn_attach = {}
-                vpn_attach["vpn-id"] = uuid_l2vpn
-                vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
-                site_network_access["vpn-attachment"] = vpn_attach
-                self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
-                uuid_sna = str(uuid.uuid4())
-                site_network_access["network-access-id"] = uuid_sna
-                site_network_access["bearer"] = connection_point_wan_info["wan_service_mapping_info"]["bearer"]
-                site_network_accesses = {}
-                site_network_access_list = []
-                site_network_access_list.append(site_network_access)
-                site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
-                conn_info_d = {}
-                conn_info_d["site"] = connection_point_wan_info["wan_service_mapping_info"]["site-id"]
-                conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
-                conn_info_d["mapping"] = None
-                conn_info.append(conn_info_d)
-                try:
-                    endpoint_site_network_access_creation = \
-                        "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
-                            self.wim["wim_url"], connection_point_wan_info["wan_service_mapping_info"]["site-id"])
-                    response_endpoint_site_network_access_creation = requests.post(
-                        endpoint_site_network_access_creation,
-                        headers=self.headers,
-                        json=site_network_accesses,
-                        auth=self.auth)
-                    
-                    if response_endpoint_site_network_access_creation.status_code == 409:
-                        self.delete_connectivity_service(vpn_service["vpn-id"])
-                        raise WimConnectorError("Site_Network_Access with ID '{}' already exists".format(
-                            site_network_access["network-access-id"]),
-                            http_code=response_endpoint_site_network_access_creation.status_code)
-                    
-                    elif response_endpoint_site_network_access_creation.status_code == 400:
-                        self.delete_connectivity_service(vpn_service["vpn-id"])
-                        raise WimConnectorError("Site {} does not exist".format(
-                            connection_point_wan_info["wan_service_mapping_info"]["site-id"]),
-                            http_code=response_endpoint_site_network_access_creation.status_code)
-                    
-                    elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
-                            response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
-                        self.delete_connectivity_service(vpn_service["vpn-id"])
-                        raise WimConnectorError("Request no accepted",
-                                                http_code=response_endpoint_site_network_access_creation.status_code)
-                
-                except requests.exceptions.ConnectionError:
-                    self.delete_connectivity_service(vpn_service["vpn-id"])
-                    raise WimConnectorError("Request Timeout", http_code=408)
-            return uuid_l2vpn, conn_info
-        
-        else:
-            raise NotImplementedError
-
-    def delete_connectivity_service(self, service_uuid, conn_info=None):
-        """Disconnect multi-site endpoints previously connected
-
-        This method should receive as the first argument the UUID generated by
-        the ``create_connectivity_service``
-        """
-        try:
-            self.logger.info("Sending delete")
-            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
-                self.wim["wim_url"], service_uuid)
-            response = requests.delete(servicepoint, auth=self.auth)
-            if response.status_code != requests.codes.no_content:
-                raise WimConnectorError("Error in the request", http_code=response.status_code)
-        except requests.exceptions.ConnectionError:
-            raise WimConnectorError("Request Timeout", http_code=408)
-
-    def edit_connectivity_service(self, service_uuid, conn_info=None,
-                                  connection_points=None, **kwargs):
-        """Change an existing connectivity service, see
-        ``create_connectivity_service``"""
-
-        # sites = {"sites": {}}
-        # site_list = []
-        vpn_service = {}
-        vpn_service["svc-topo"] = "any-to-any"
-        counter = 0
-        for connection_point in connection_points:
-            site_network_access = {}
-            connection_point_wan_info = self.search_mapp(connection_point)
-            params_site = {}
-            params_site["site-id"] = connection_point_wan_info["wan_service_mapping_info"]["site-id"]
-            params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
-            device_site = {}
-            device_site["device-id"] = connection_point_wan_info["device-id"]
-            params_site["devices"] = device_site
-            # network_access = {}
-            connection = {}
-            if connection_point["service_endpoint_encapsulation_type"] != "none":
-                if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
-                    """ The connection is a VLAN """
-                    connection["encapsulation-type"] = "dot1q-vlan-tagged"
-                    tagged = {}
-                    tagged_interf = {}
-                    service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
-                    if service_endpoint_encapsulation_info["vlan"] is None:
-                        raise WimConnectorError("VLAN must be provided")
-                    tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
-                    tagged["dot1q-vlan-tagged"] = tagged_interf
-                    connection["tagged-interface"] = tagged
-                else:
-                    raise NotImplementedError("Encapsulation type not implemented")
-            site_network_access["connection"] = connection
-            vpn_attach = {}
-            vpn_attach["vpn-id"] = service_uuid
-            vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
-            site_network_access["vpn-attachment"] = vpn_attach
-            uuid_sna = conn_info[counter]["site-network-access-id"]
-            site_network_access["network-access-id"] = uuid_sna
-            site_network_access["bearer"] = connection_point_wan_info["wan_service_mapping_info"]["bearer"]
-            site_network_accesses = {}
-            site_network_access_list = []
-            site_network_access_list.append(site_network_access)
-            site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list
-            try:
-                endpoint_site_network_access_edit = \
-                    "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
-                        self.wim["wim_url"], connection_point_wan_info["wan_service_mapping_info"]["site-id"])
-                response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
-                                                                              headers=self.headers,
-                                                                              json=site_network_accesses,
-                                                                              auth=self.auth)
-                if response_endpoint_site_network_access_creation.status_code == 400:
-                    raise WimConnectorError("Service does not exist",
-                                            http_code=response_endpoint_site_network_access_creation.status_code)
-                elif response_endpoint_site_network_access_creation.status_code != 201 and \
-                        response_endpoint_site_network_access_creation.status_code != 204:
-                    raise WimConnectorError("Request no accepted",
-                                            http_code=response_endpoint_site_network_access_creation.status_code)
-            except requests.exceptions.ConnectionError:
-                raise WimConnectorError("Request Timeout", http_code=408)
-            counter += 1
-        return None
-
-    def clear_all_connectivity_services(self):
-        """Delete all WAN Links corresponding to a WIM"""
-        try:
-            self.logger.info("Sending clear all connectivity services")
-            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
-            response = requests.delete(servicepoint, auth=self.auth)
-            if response.status_code != requests.codes.no_content:
-                raise WimConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
-        except requests.exceptions.ConnectionError:
-            raise WimConnectorError("Request Timeout", http_code=408)
-
-    def get_all_active_connectivity_services(self):
-        """Provide information about all active connections provisioned by a
-        WIM
-        """
-        try:
-            self.logger.info("Sending get all connectivity services")
-            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
-            response = requests.get(servicepoint, auth=self.auth)
-            if response.status_code != requests.codes.ok:
-                raise WimConnectorError("Unable to get all connectivity services", http_code=response.status_code)
-            return response
-        except requests.exceptions.ConnectionError:
-            raise WimConnectorError("Request Timeout", http_code=408)
diff --git a/osm_ro/wim/wimconn_odl.py b/osm_ro/wim/wimconn_odl.py
deleted file mode 100644 (file)
index 2371046..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- coding: utf-8 -*-
-##
-# Copyright 2018 University of Bristol - High Performance Networks Research
-# Group
-# All Rights Reserved.
-#
-# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
-# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: <highperformance-networks@bristol.ac.uk>
-#
-# Neither the name of the University of Bristol nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# This work has been performed in the context of DCMS UK 5G Testbeds
-# & Trials Programme and in the framework of the Metro-Haul project -
-# funded by the European Commission under Grant number 761727 through the
-# Horizon 2020 and 5G-PPP programmes.
-##
-from .wimconn import WimConnector
-
-
-# TODO: Basically create this file
-
-class OdlConnector(WimConnector):
-    def get_connectivity_service_status(self, link_uuid):
-        raise NotImplementedError
-
-    def create_connectivity_service(self, *args, **kwargs):
-        raise NotImplementedError
-
-    def delete_connectivity_service(self, link_uuid):
-        raise NotImplementedError
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644 (file)
index bda5b8a..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-PyYAML
-bottle
-MySQL-python
-jsonschema
-paramiko
-argcomplete
-requests
-logutils
-python-openstackclient
-python-novaclient
-python-keystoneclient
-python-glanceclient
-python-neutronclient
-networking-l2gw
-python-cinderclient
-pyvcloud==19.1.1
-pyvmomi
-progressbar
-prettytable
-boto
-genisoimage
-untangle
-pyone
-oca
-azure
diff --git a/scripts/RO-of b/scripts/RO-of
deleted file mode 100755 (executable)
index 1e5c917..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
-
-# author: Alfonso Tierno
-
-# to get docker id that is running:
-# $ docker_ro=`docker service ps osm_ro -f desired-state=running --format "{{.Name}}.{{.ID}}" --no-trunc`
-# exec with:
-# $ docker exec -ti $docker_ro RO-of
-
-function get_from_db()
-{
-    echo  "select $1 from $2 where name='$3' or uuid='$3';" | mysql -h"$RO_DB_HOST" -u"$RO_DB_OVIM_USER" -p"$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME"  2>/dev/null | tail -n1
-}
-
-[ -z "$RO_DB_OVIM_HOST" ] && export RO_DB_OVIM_HOST="$RO_DB_HOST"
-
-if [ -z "$1" ] ; then
-    echo "usage '$0 <sdn_controller> command'"
-    echo
-    echo "available sdn_controllers are:"
-    echo  "select uuid, name, type, ip, dpid, status from ofcs;" | mysql -h"$RO_DB_HOST" -u"$RO_DB_OVIM_USER" -p"$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME"  2>/dev/null
-    exit
-fi
-
-
-export OF_CONTROLLER_DPID=`get_from_db dpid ofcs $1`
-[ -z "$OF_CONTROLLER_DPID" ] && echo "Cannot find sdn_controller '$1' at database" >&2 && exit 1
-
-export OF_CONTROLLER_IP=`get_from_db ip ofcs $1`
-export OF_CONTROLLER_PORT=`get_from_db port ofcs $1`
-export OF_CONTROLLER_USER=`get_from_db user ofcs $1`
-export OF_CONTROLLER_PASSWORD=`get_from_db password ofcs $1`
-export OF_CONTROLLER_TYPE=`get_from_db type ofcs $1`
-
-shift
-openflow-lib "$@"
-
-
-
diff --git a/scripts/RO-start.sh b/scripts/RO-start.sh
deleted file mode 100755 (executable)
index 47547cd..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-
-# This script is intended for launching RO from a docker container.
-# It waits for mysql server ready, normally running on a separate container, ...
-# then it checks if database is present and creates it if needed.
-# Finally it launches RO server.
-
-[ -z "$RO_DB_OVIM_HOST" ] && export RO_DB_OVIM_HOST="$RO_DB_HOST"
-[ -z "$RO_DB_OVIM_ROOT_PASSWORD" ] && export RO_DB_OVIM_ROOT_PASSWORD="$RO_DB_ROOT_PASSWORD"
-
-function is_db_created() {
-    db_host=$1
-    db_port=$2
-    db_user=$3
-    db_pswd=$4
-    db_name=$5
-    db_version=$6  # minimun database version
-
-    if mysqlshow -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" | grep -v Wildcard | grep -q -e "$db_name" ; then
-        if echo "SELECT comments FROM schema_version WHERE version_int=0;" |
-                mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
-                grep -q -e "init" ; then
-            echo " DB $db_name exists BUT failed in previous init" >&2
-            return 1
-        elif echo "SELECT * FROM schema_version WHERE version_int=$db_version;" |
-                mysql -h"$db_host" -P"$db_port" -u"$db_user" -p"$db_pswd" "$db_name" |
-                grep -q -e "$db_version" ; then
-            echo " DB $db_name exists and inited" >&2
-            return 0
-        else
-            echo " DB $db_name exists BUT not inited" >&2
-            return 1
-        fi
-    fi
-    echo " DB $db_name does not exist" >&2
-    return 1
-}
-
-function configure(){
-    #Database parameters
-    #db_host:   localhost
-    #db_user:   mano
-    #db_passwd: manopw
-    #db_name:   mano_db
-    # Database ovim parameters
-    #db_ovim_host:   localhost          # by default localhost
-    #db_ovim_user:   mano               # DB user
-    #db_ovim_passwd: manopw             # DB password
-    #db_ovim_name:   mano_vim_db        # Name of the OVIM MANO DB
-
-
-    sed -i "s/^db_host:.*/db_host: $RO_DB_HOST/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_user:.*/db_user: $RO_DB_USER/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_passwd:.*/db_passwd: $RO_DB_PASSWORD/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_name:.*/db_name: $RO_DB_NAME/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_ovim_host:.*/db_ovim_host: $RO_DB_OVIM_HOST/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_ovim_user:.*/db_ovim_user: $RO_DB_OVIM_USER/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_ovim_passwd:.*/db_ovim_passwd: $RO_DB_OVIM_PASSWORD/" /etc/osm/openmanod.cfg || return 1
-    sed -i "s/^db_ovim_name:.*/db_ovim_name: $RO_DB_OVIM_NAME/" /etc/osm/openmanod.cfg || return 1
-    return 0
-}
-
-max_attempts=120
-function wait_db(){
-    db_host=$1
-    db_port=$2
-    attempt=0
-    echo "Wait until $max_attempts seconds for MySQL mano Server ${db_host}:${db_port} "
-    while ! mysqladmin ping -h"$db_host" -P"$db_port" --silent; do
-        #wait 120 sec
-        if [ $attempt -ge $max_attempts ]; then
-            echo
-            echo "Cannot connect to database ${db_host}:${db_port} during $max_attempts sec" >&2
-            return 1
-        fi
-        attempt=$[$attempt+1]
-        echo -n "."
-        sleep 1
-    done
-    return 0
-}
-
-
-echo "1/4 Apply config"
-# this is not needed anymore because envioron overwrites config file
-# configure || exit 1
-
-
-echo "2/4 Wait for db up"
-wait_db "$RO_DB_HOST" "$RO_DB_PORT" || exit 1
-[ "$RO_DB_OVIM_HOST" = "$RO_DB_HOST" ] ||  wait_db "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" || exit 1
-
-
-echo "3/4 Init database"
-RO_PATH=`python -c 'import osm_ro; print(osm_ro.__path__[0])'`
-echo "RO_PATH: $RO_PATH"
-if ! is_db_created "$RO_DB_HOST" "$RO_DB_PORT" "$RO_DB_USER" "$RO_DB_PASSWORD" "$RO_DB_NAME" "27"
-then
-    if [ -n "$RO_DB_ROOT_PASSWORD" ] ; then
-        mysqladmin -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" create "$RO_DB_NAME"
-        echo "CREATE USER '${RO_DB_USER}'@'%' IDENTIFIED BY '${RO_DB_PASSWORD}';" |
-            mysql -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD" || echo "user ${RO_DB_USER} already created?"
-        echo "GRANT ALL PRIVILEGES ON ${RO_DB_NAME}.* TO '${RO_DB_USER}'@'%';" |
-            mysql -h"$RO_DB_HOST" -uroot -p"$RO_DB_ROOT_PASSWORD"  || echo "user ${RO_DB_USER} already granted?"
-    fi
-    ${RO_PATH}/database_utils/init_mano_db.sh  -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
-        -P "${RO_DB_PORT}" -d "${RO_DB_NAME}" || exit 1
-else
-    echo "  migrate database version"
-    ${RO_PATH}/database_utils/migrate_mano_db.sh -u "$RO_DB_USER" -p "$RO_DB_PASSWORD" -h "$RO_DB_HOST" \
-        -P "$RO_DB_PORT" -d "$RO_DB_NAME" -b /var/log/osm
-fi
-
-OVIM_PATH=`python -c 'import lib_osm_openvim; print(lib_osm_openvim.__path__[0])'`
-echo "OVIM_PATH: $OVIM_PATH"
-if ! is_db_created "$RO_DB_OVIM_HOST" "$RO_DB_OVIM_PORT" "$RO_DB_OVIM_USER" "$RO_DB_OVIM_PASSWORD" "$RO_DB_OVIM_NAME" \
-    "22"
-then
-    if [ -n "$RO_DB_OVIM_ROOT_PASSWORD" ] ; then
-        mysqladmin -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" create "$RO_DB_OVIM_NAME"
-        echo "CREATE USER '${RO_DB_OVIM_USER}'@'%' IDENTIFIED BY '${RO_DB_OVIM_PASSWORD}';" |
-            mysql -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD" ||
-            echo "user ${RO_DB_OVIM_USER} already created?"
-        echo "GRANT ALL PRIVILEGES ON ${RO_DB_OVIM_NAME}.* TO '${RO_DB_OVIM_USER}'@'%';" |
-            mysql -h"$RO_DB_OVIM_HOST" -uroot -p"$RO_DB_OVIM_ROOT_PASSWORD"  ||
-            echo "user ${RO_DB_OVIM_USER} already granted?"
-    fi
-    ${OVIM_PATH}/database_utils/init_vim_db.sh  -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST" \
-        -P "${RO_DB_OVIM_PORT}" -d "${RO_DB_OVIM_NAME}" || exit 1
-else
-    echo "  migrate database version"
-    ${OVIM_PATH}/database_utils/migrate_vim_db.sh -u "$RO_DB_OVIM_USER" -p "$RO_DB_OVIM_PASSWORD" -h "$RO_DB_OVIM_HOST"\
-        -P "$RO_DB_OVIM_PORT" -d "$RO_DB_OVIM_NAME" -b /var/log/osm
-fi
-
-
-echo "4/4 Try to start"
-# look for openmanod.cfg
-RO_CONFIG_FILE="/etc/osm/openmanod.cfg"
-[ -f "$RO_CONFIG_FILE" ] || RO_CONFIG_FILE=$(python -c 'import osm_ro; print(osm_ro.__path__[0])')/openmanod.cfg
-[ -f "$RO_CONFIG_FILE" ] || ! echo "configuration file 'openmanod.cfg' not found" || exit 1
-
-openmanod -c "$RO_CONFIG_FILE"  --create-tenant=osm  # --log-file=/var/log/osm/openmano.log
-
diff --git a/scripts/get-options.sh b/scripts/get-options.sh
deleted file mode 100644 (file)
index 8b2968e..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-
-#Utility for getting options, must be call with source
-#for every <option> it sets a variable 'option_<option>="-"' 
-#if the option appears more than once, it concatenates a "-"
-#if the option contains an argument: 'option_<option>="argument"'
-#if the long option name contains "-" they are converted to "_"
-#params that are not options are stored in 'params'
-#the options to look for is received in the first argument, 
-#a blank separator list with short and long options without the leading - or --
-#options to be stored in the same variable must appear in the same word separated by ':'
-#insert a trailing = if the option requires an argument
-#insert a trailing ? if the option may have an argument NOT IMPLEMENTED
-#option -- means get the rest of argument returned as 'option__=$*'
-
-#example: to allow options -h --help -j -k(with argument) --my-long-option(with argument)
-# and other parameters after -- provide
-#     "help:h j k= my-long-option="
-#parsing "-h -karg pepe --my-long-option=otherar -- -s" will set variables
-#       option_help="-"
-#       option_k="arg"
-#       option_my_long_option="otherarg"
-#       params=" pepe"
-#       option__="-s"
-
-
-#detect if is called with a source to use the 'exit'/'return' command for exiting
-[[ ${BASH_SOURCE[0]} != $0 ]] && ___exit="return" || ___exit="exit"
-
-options="$1"
-shift
-
-get_argument=""
-#reset variables
-params=""
-for option_group in $options
-do
-    _name=${option_group%%:*}
-    _name=${_name%=}
-    _name=${_name//-/_}
-    eval option_${_name}='""'
-done
-
-while [[ $# -gt 0 ]]
-do
-    argument="$1"
-    shift
-    if [[ -n $get_argument ]]
-    then
-        [[ ${argument:0:1} == "-" ]] && echo "option '-$option' requires an argument"  >&2 && $___exit 1
-        eval ${get_argument}='"$argument"'
-        #echo option $get_argument with argument
-        get_argument=""
-        continue
-    fi
-
-
-    #short options
-    if [[ ${argument:0:1} == "-" ]] && [[ ${argument:1:1} != "-" ]] && [[ ${#argument} -ge 2 ]]
-    then
-        index=0
-        while index=$((index+1)) && [[ $index -lt ${#argument} ]]
-        do
-            option=${argument:$index:1}
-            bad_option=y
-            for option_group in $options
-            do
-                _name=""
-                for o in $(echo $option_group | tr ":=" " ")
-                do
-                    [[ -z "$_name" ]] && _name=${o//-/_}
-                    #echo option $option versus $o
-                    if [[ "$option" == "${o}" ]]
-                    then
-                        eval option_${_name}='${option_'${_name}'}-'
-                        bad_option=n
-                        if [[ ${option_group:${#option_group}-1} != "=" ]]
-                        then
-                            continue
-                        fi 
-                        if [[ ${#argument} -gt $((index+1)) ]]
-                        then
-                            eval option_${_name}='"${argument:$((index+1))}"'
-                            index=${#argument}
-                        else
-                            get_argument=option_${_name}
-                            #echo next should be argument $argument
-                        fi
-    
-                        break
-                    fi
-                done
-            done
-            [[ $bad_option == y ]] && echo "invalid argument '-$option'?  Type -h for help" >&2 && $___exit 1
-        done
-    elif [[ ${argument:0:2} == "--" ]] && [[ ${#argument} -ge 3 ]]
-    then 
-        option=${argument:2}
-        option_argument=${option#*=}
-        option_name=${option%%=*}
-        [[ "$option_name" == "$option" ]] && option_argument=""
-        bad_option=y
-        for option_group in $options
-        do
-            _name=""
-            for o in $(echo $option_group | tr ":=" " ")
-            do
-                [[ -z "$_name" ]] && _name=${o//-/_}
-                #echo option $option versus $o
-                if [[ "$option_name" == "${o}" ]]
-                then
-                    bad_option=n
-                    if [[ ${option_group:${#option_group}-1} != "=" ]] 
-                    then #not an argument
-                        [[ -n "${option_argument}" ]] && echo "option '--${option%%=*}' do not accept an argument " >&2 && $___exit 1
-                        eval option_${_name}='"${option_'${_name}'}-"'
-                    elif [[ -n "${option_argument}" ]]
-                    then
-                        eval option_${_name}='"${option_argument}"'
-                    else
-                        get_argument=option_${_name}
-                        #echo next should be argument $argument
-                    fi
-                    break
-                fi
-            done
-        done
-        [[ $bad_option == y ]] && echo "invalid argument '-$option'?  Type -h for help" >&2 && $___exit 1
-    elif [[ ${argument:0:2} == "--" ]]
-    then
-        option__="$*"
-        bad_option=y
-        for o in $options
-        do
-            if [[ "$o" == "--" ]]
-            then
-                bad_option=n
-                option__=" $*"
-                break
-            fi
-        done
-        [[ $bad_option == y ]] && echo "invalid argument '--'?  Type -h for help" >&2 && $___exit 1
-        break
-    else
-        params="$params ${argument}"
-    fi
-
-done
-
-[[ -n "$get_argument" ]] && echo "option '-$option' requires an argument"  >&2 && $___exit 1
-$___exit 0
-#echo params $params
-
diff --git a/scripts/install-lib-osm-openvim.sh b/scripts/install-lib-osm-openvim.sh
deleted file mode 100755 (executable)
index c1374d5..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
-
-# author: Alfonso Tierno
-
-# It uses following env, if not provided filling by default
-[ -z "$GIT_OVIM_URL" ] && GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
-[ -z "$DEVELOP" ] && DEVELOP=""
-# folder where RO is installed
-[ -z "$BASEFOLDER" ] && HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]})) && BASEFOLDER=$(dirname $HERE)
-[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
-[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
-[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
-
-
-function usage(){
-    echo -e "usage: sudo -E $0 [OPTIONS]"
-    echo -e "Install last stable source code of lib-osm-openvim and the needed packages"
-    echo -e "  OPTIONS"
-    echo -e "     -h --help:  show this help"
-    echo -e "     -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
-    echo -e "                    -b master          (main branch)"
-    echo -e "                    -b v2.0            (v2.0 branch)"
-    echo -e "                    -b tags/v1.1.0     (a specific tag)"
-    echo -e "                    ..."
-    echo -e "     --develop:  install last master version for developers"
-    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This" \
-            "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
-}
-while getopts ":b:h-:" o; do
-    case "${o}" in
-        b)
-            export COMMIT_ID=${OPTARG}
-            ;;
-        h)
-            usage && exit 0
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
-            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
-            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
-            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        \?)
-            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit 1
-            ;;
-    esac
-done
-
-su $SUDO_USER -c "git -C '${BASEFOLDER}' clone ${GIT_OVIM_URL} lib-openvim" ||
-    ! echo "Error cannot clone from '${GIT_OVIM_URL}'" >&2 || exit 1
-if [[ -n $COMMIT_ID ]] ; then
-    echo -e "Installing lib-osm-openvim from refspec: $COMMIT_ID"
-    su $SUDO_USER -c "git -C '${BASEFOLDER}/lib-openvim' checkout $COMMIT_ID" ||
-        ! echo "Error cannot checkout '$COMMIT_ID' from '${GIT_OVIM_URL}'" >&2 || exit 1
-elif [[ -z $DEVELOP ]]; then
-    LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/lib-openvim" tag -l "v[0-9]*" | sort -V | tail -n1`
-    echo -e "Installing lib-osm-openvim from refspec: tags/${LATEST_STABLE_TAG}"
-    su $SUDO_USER -c "git -C '${BASEFOLDER}/lib-openvim' checkout tags/${LATEST_STABLE_TAG}" ||
-        ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '${GIT_OVIM_URL}'" >&2 || exit 1
-else
-    echo -e "Installing lib-osm-openvim from refspec: master"
-fi
-
-make -C "${BASEFOLDER}/lib-openvim" prepare_lite
-export LANG="en_US.UTF-8"
-pip2 install -e  "${BASEFOLDER}/lib-openvim/build" || ! echo "ERROR installing lib-osm-openvim library!!!" >&2  ||
-    exit 1
diff --git a/scripts/install-openmano-service.sh b/scripts/install-openmano-service.sh
deleted file mode 100755 (executable)
index c19b345..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#ONLY TESTED for Ubuntu 16.04
-#it configures openmano to run as a service
-
-function usage(){
-    echo -e "usage: sudo -E $0 [OPTIONS]"
-    echo -e "Configures openmano to run as a service at /opt"
-    echo -e "  OPTIONS"
-    echo -e "     -u USER_OWNER  user owner of the service, 'root' by default"
-    echo -e "     -f PATH  path where openmano source is located. If missing it is downloaded from git"
-    echo -e "     -d --delete:  if -f is provided, delete this path after copying to /opt"
-    echo -e "     -h:  show this help"
-    echo -e "     --uninstall: remove created service and files"
-}
-
-function uninstall(){
-    echo "systemctl disable openmano.service " &&  systemctl disable openmano.service 2>/dev/null ||
-        echo "  Already done"
-    echo "systemctl disable osm-ro.service " &&  systemctl disable osm-ro.service 2>/dev/null ||
-        echo "  Already done"
-    echo "service openmano stop " && service openmano stop 2>/dev/null || echo "  Already done"
-    echo "service osm-ro stop " && service osm-ro stop 2>/dev/null || echo "  Already done"
-    for file in /opt/openmano /etc/default/openmanod.cfg /etc/osm/openmanod.cfg /var/log/openmano /var/log/osm/openmano* \
-        /etc/systemd/system/openmano.service /etc/systemd/system/osm-ro.service /usr/bin/openmano /usr/sbin/service-openmano \
-        /usr/bin/openmano-report
-    do
-        echo rm $file
-        rm -rf $file || ! echo "Can not delete '$file'. Needed root privileges?" >&2 || exit 1
-    done
-    echo "Done"
-}
-
-GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
-USER_OWNER="root"
-QUIET_MODE=""
-FILE=""
-DELETE=""
-while getopts ":u:f:hdq-:" o; do
-    case "${o}" in
-        u)
-            export USER_OWNER="$OPTARG"
-            ;;
-        f)
-            export FILE="$OPTARG"
-            ;;
-        q)
-            export QUIET_MODE=yes
-            ;;
-        h)
-            usage && exit 0
-            ;;
-        d)
-            DELETE=y
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "uninstall" ] && uninstall && exit 0
-            [ "${OPTARG}" == "delete" ] && DELETE=y && continue
-            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;; 
-        \?)
-            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit -1
-            ;;
-    esac
-done
-BAD_PATH_ERROR="Path '$FILE' does not contain a valid openmano distribution"
-
-#check root privileges
-[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
-
-#Discover Linux distribution
-#try redhat type
-if [[ -f /etc/redhat-release ]]
-then 
-    _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut  -d" " -f1)
-else 
-    #if not assuming ubuntu type
-    _DISTRO=$(lsb_release -is  2>/dev/null)
-fi            
-if [[ "$_DISTRO" == "Ubuntu" ]]
-then
-    _RELEASE=$(lsb_release -rs)
-    if [[ ${_RELEASE%%.*} != 16 ]] 
-    then 
-        echo "Only tested in Ubuntu Server 16.04" >&2 && exit 1
-    fi
-else
-    echo "Only tested in Ubuntu Server 16.04" >&2 && exit 1
-fi
-
-
-if [[ -z "$FILE" ]]
-then
-    FILE=__temp__${RANDOM}
-    git clone $GIT_URL $FILE || ! echo "Cannot get openmano source code from $GIT_URL" >&2 || exit 1
-    DELETE=y
-else
-    [[ -d  "$FILE" ]] || ! echo $BAD_PATH_ERROR >&2 || exit 1
-fi
-
-#make idempotent
-uninstall
-#copy files
-cp -r "$FILE" /opt/openmano         || ! echo $BAD_PATH_ERROR >&2 || exit 1
-mkdir -p /etc/osm  || echo "warning cannot create config folder '/etc/osm'"
-cp /opt/openmano/osm_ro/openmanod.cfg /etc/osm/openmanod.cfg  ||
-    echo "warning cannot create file '/etc/osm/openmanod.cfg'"
-mkdir -p /var/log/osm  || echo "warning cannot create log folder '/var/log/osm'"
-#makes links
-ln -s -v /opt/openmano/openmano /usr/bin/openmano
-ln -s -v /opt/openmano/scripts/service-openmano /usr/sbin/service-openmano
-ln -s -v /opt/openmano/scripts/openmano-report /usr/bin/openmano-report
-
-chown -R $SUDO_USER /opt/openmano
-
-mkdir -p /etc/systemd/system/
-cat  > /etc/systemd/system/osm-ro.service  << EOF 
-[Unit]
-Description=openmano server
-
-[Service]
-User=${USER_OWNER}
-ExecStart=/opt/openmano/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-[[ -n $DELETE ]] && rm -rf "${FILE}"
-
-service osm-ro start
-systemctl enable osm-ro.service
-
-echo Done
-exit
diff --git a/scripts/install-openmano.sh b/scripts/install-openmano.sh
deleted file mode 100755 (executable)
index 98d0905..0000000
+++ /dev/null
@@ -1,446 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#ONLY TESTED in Ubuntu 16.04   partially tested in Ubuntu 14.10 14.04 16.04, CentOS7 and RHEL7
-#Get needed packages, source code and configure to run openmano
-#Ask for database user and password if not provided
-
-function usage(){
-    echo -e "usage: sudo -E $0 [OPTIONS]"
-    echo -e "Install last stable source code in ./openmano and the needed packages"
-    echo -e "On a Ubuntu 16.04 it configures openmano as a service"
-    echo -e "  OPTIONS"
-    echo -e "     -u USER:    database admin user. 'root' by default. Prompts if needed"
-    echo -e "     -p PASS:    database admin password to be used or installed. Prompts if needed"
-    echo -e "     -q --quiet: install in unattended mode"
-    echo -e "     -h --help:  show this help"
-    echo -e "     -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
-    echo -e "                    -b master          (main RO branch)"
-    echo -e "                    -b v2.0            (v2.0 branch)"
-    echo -e "                    -b tags/v1.1.0     (a specific tag)"
-    echo -e "                    ..."
-    echo -e "     --develop:  install last version for developers, and do not configure as a service"
-    echo -e "     --forcedb:  reinstall mano_db DB, deleting previous database if exists and creating a new one"
-    echo -e "     --updatedb: do not reinstall mano_db DB if it exists, just update database"
-    echo -e "     --force:    makes idenpotent, delete previous installations folders if needed. It assumes --updatedb if --forcedb option is not provided"
-    echo -e "     --noclone:  assumes that openmano was cloned previously and that this script is run from the local repo"
-    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
-    echo -e "     --no-db: do not install mysql server"
-}
-
-function install_packages(){
-    [ -x /usr/bin/apt-get ] && apt-get install -y $*
-    [ -x /usr/bin/yum ]     && yum install     -y $*   
-    
-    #check properly installed
-    for PACKAGE in $*
-    do
-        PACKAGE_INSTALLED="no"
-        [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE            &>> /dev/null && PACKAGE_INSTALLED="yes"
-        [ -x /usr/bin/yum ]     && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" 
-        if [ "$PACKAGE_INSTALLED" = "no" ]
-        then
-            echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
-            exit 1
-       fi
-    done
-}
-
-function ask_user(){
-    # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
-    # Params: $1 text to ask;   $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
-    # Return: true(0) if user type 'yes'; false (1) if user type 'no'
-    read -e -p "$1" USER_CONFIRMATION
-    while true ; do
-        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
-        [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
-        [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
-        [ "${USER_CONFIRMATION,,}" == "no" ]  || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
-        read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
-    done
-}
-
-GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
-export GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
-export GIT_OSMIM_URL=https://osm.etsi.org/gerrit/osm/IM.git
-DBUSER="root"
-DBPASSWD=""
-DBPASSWD_PARAM=""
-QUIET_MODE=""
-DEVELOP=""
-DB_FORCE_UPDATE=""
-UPDATEDB=""
-FORCE=""
-NOCLONE=""
-NO_PACKAGES=""
-NO_DB=""
-COMMIT_ID=""
-
-while getopts ":u:p:b:hiq-:" o; do
-    case "${o}" in
-        u)
-            export DBUSER="$OPTARG"
-            ;;
-        p)
-            export DBPASSWD="$OPTARG"
-            export DBPASSWD_PARAM="-p$OPTARG"
-            ;;
-        b)
-            export COMMIT_ID=${OPTARG}
-            ;;
-        q)
-            export QUIET_MODE=yes
-            export DEBIAN_FRONTEND=noninteractive
-            ;;
-        h)
-            usage && exit 0
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
-            [ "${OPTARG}" == "forcedb" ] && DB_FORCE_UPDATE="${DB_FORCE_UPDATE}--forcedb" && continue
-            [ "${OPTARG}" == "updatedb" ] && DB_FORCE_UPDATE="${DB_FORCE_UPDATE}--updatedb" && continue
-            [ "${OPTARG}" == "force" ]   &&  FORCE="y" && continue
-            [ "${OPTARG}" == "noclone" ] && NOCLONE="y" && continue
-            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
-            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
-            [ "${OPTARG}" == "no-db" ] && NO_DB="y" && continue
-            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        \?)
-            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit 1
-            ;;
-    esac
-done
-
-if [ "$DB_FORCE_UPDATE" == "--forcedb--updatedb" ] || [ "$DB_FORCE_UPDATE" == "--updatedb--forcedb" ] ; then
-    echo "Error: options --forcedb and --updatedb are mutually exclusive" >&2
-    exit 1
-elif [ -n "$FORCE" ] && [ -z "$DB_FORCE_UPDATE" ] ; then
-    DB_FORCE_UPDATE="--updatedb"
-fi
-
-#check root privileges and non a root user behind
-[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && exit 1
-if [[ -z "$SUDO_USER" ]] || [[ "$SUDO_USER" = "root" ]]
-then
-    [[ -z $QUIET_MODE ]] && ! ask_user "Install in the root user (y/N)? " n  && echo "Cancelled" && exit 1
-    export SUDO_USER=root
-fi
-
-# Discover Linux distribution
-# try redhat type
-[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut  -d" " -f1) 
-# if not assuming ubuntu type
-[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is  2>/dev/null)            
-if [ "$_DISTRO" == "Ubuntu" ]
-then
-    _RELEASE=$(lsb_release -rs)
-    if [[ ${_RELEASE%%.*} != 14 ]] && [[ ${_RELEASE%%.*} != 16 ]]
-    then
-        [[ -z $QUIET_MODE ]] &&
-            ! ask_user "WARNING! Not tested Ubuntu version. Continue assuming a trusty (14.XX)' (y/N)? " n &&
-            echo "Cancelled" && exit 1
-        _RELEASE = 14
-    fi
-elif [ "$_DISTRO" == "CentOS" ]
-then
-    _RELEASE="7" 
-    if ! cat /etc/redhat-release | grep -q "7."
-    then
-        [[ -z $QUIET_MODE ]] &&
-            ! ask_user "WARNING! Not tested CentOS version. Continue assuming a '$_RELEASE' type (y/N)? " n &&
-            echo "Cancelled" && exit 1
-    fi
-elif [ "$_DISTRO" == "Red" ]
-then
-    _RELEASE="7" 
-    if ! cat /etc/redhat-release | grep -q "7."
-    then
-        [[ -z $QUIET_MODE ]] &&
-            ! ask_user "WARNING! Not tested Red Hat OS version. Continue assuming a '$_RELEASE' type (y/N)? " n &&
-            echo "Cancelled" && exit 1
-    fi
-else  #[ "$_DISTRO" != "Ubuntu" -a "$_DISTRO" != "CentOS" -a "$_DISTRO" != "Red" ] 
-    _DISTRO_DISCOVER=$_DISTRO
-    [ -x /usr/bin/apt-get ] && _DISTRO="Ubuntu" && _RELEASE="14"
-    [ -x /usr/bin/yum ]     && _DISTRO="CentOS" && _RELEASE="7"
-    [[ -z $QUIET_MODE ]] &&
-        ! ask_user "WARNING! Not tested Linux distribution '$_DISTRO_DISCOVER '. Continue assuming a '$_DISTRO $_RELEASE' type (y/N)? " n &&
-        echo "Cancelled" && exit 1
-fi
-
-export _DISTRO="$_DISTRO"
-#check if installed as a service
-INSTALL_AS_A_SERVICE=""
-[[ "$_DISTRO" == "Ubuntu" ]] &&  [[ ${_RELEASE%%.*} == 16 ]] && [[ -z $DEVELOP ]] && INSTALL_AS_A_SERVICE="y"
-
-# Next operations require knowing BASEFOLDER
-if [[ -z "$NOCLONE" ]]; then
-    if [[ -n "$INSTALL_AS_A_SERVICE" ]] ; then
-        export BASEFOLDER=__openmano__${RANDOM}
-    else
-        export BASEFOLDER="${PWD}/openmano"
-    fi
-    [[ -n "$FORCE" ]] && rm -rf $BASEFOLDER #make idempotent
-else
-    HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-    export BASEFOLDER=$(dirname $HERE)
-fi
-
-if [[ -z "$NO_PACKAGES" ]]
-then
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        UPDATE REPOSITORIES                            #####\n"\
-        "#################################################################"
-    [ "$_DISTRO" == "Ubuntu" ] && apt-get update -y &&
-        add-apt-repository -y cloud-archive:queens && apt-get update -y
-
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum check-update -y
-    [ "$_DISTRO" == "CentOS" ] && yum install -y epel-release
-    [ "$_DISTRO" == "Red" ] && wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm \
-        && rpm -ivh epel-release-7-5.noarch.rpm && yum install -y epel-release && rm -f epel-release-7-5.noarch.rpm
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum repolist
-
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        INSTALL REQUIRED PACKAGES                      #####\n"\
-        "#################################################################"
-    [ "$_DISTRO" == "Ubuntu" ] && install_packages "git make screen wget mysql-client"
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git make screen wget mariadb-client"
-
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        INSTALL PYTHON PACKAGES                        #####\n"\
-        "#################################################################"
-    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-mysqldb python-jsonschema "\
-        "python-paramiko python-argcomplete python-requests python-logutils libxml2-dev libxslt-dev python-dev "\
-        "python-pip python-crypto python-networkx"
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML MySQL-python python-jsonschema "\
-        "python-paramiko python-argcomplete python-requests python-logutils libxslt-devel libxml2-devel python-devel "\
-        "python-pip python-crypto python-networkx"
-    # The only way to install python-bottle on Centos7 is with easy_install or pip
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
-
-    # required for vmware connector TODO move that to separete opt in install script
-    pip2 install pip==9.0.3 || exit 1   #  --upgrade pip    install pip 10 that does not work
-    pip2 install pyvcloud==19.1.1 || exit 1
-    pip2 install progressbar || exit 1
-    pip2 install prettytable || exit 1
-    pip2 install pyvmomi || exit 1
-    [ "$_DISTRO" == "Ubuntu" ] && install_packages "genisoimage"
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "genisoimage"
-
-    # required for fog connector
-    pip2 install fog05rest || exit 1
-
-    # required for OpenNebula connector
-    pip2 install untangle || exit 1
-    pip2 install pyone || exit 1
-    pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca || exit 1
-
-    # required for AWS connector
-    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-boto"
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-boto"  #TODO check if at Centos it exists with this name, or PIP should be used
-
-    # install openstack client needed for using openstack as a VIM
-    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-novaclient python-keystoneclient python-glanceclient "\
-                                                   "python-neutronclient python-cinderclient python-openstackclient "\
-                                                   "python-networking-l2gw"
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-devel" && easy_install \
-        python-novaclient python-keystoneclient python-glanceclient python-neutronclient python-cinderclient \
-        python-openstackclient python-networking-l2gw #TODO revise if gcc python-pip is needed
-
-    # required for Azure
-    pip2 install azure
-    
-fi  # [[ -z "$NO_PACKAGES" ]]
-
-if [[ -z $NOCLONE ]]; then
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        DOWNLOAD SOURCE                                #####\n"\
-        "#################################################################"
-    if [[ -d "${BASEFOLDER}" ]] ; then
-        if [[ -n "$FORCE" ]] ; then
-            echo "deleting '${BASEFOLDER}' folder"
-            rm -rf "$BASEFOLDER" #make idempotent
-        elif [[ -z "$QUIET_MODE" ]] ; then
-            ! ask_user "folder '${BASEFOLDER}' exists, overwrite (y/N)? " n && echo "Cancelled!" && exit 1
-            rm -rf "$BASEFOLDER"
-        else
-            echo "'${BASEFOLDER}' folder exists. Use "--force" to overwrite" >&2 && exit 1
-        fi
-    fi
-    su $SUDO_USER -c "git clone ${GIT_URL} ${BASEFOLDER}" || ! echo "Error cannot clone from '$GIT_URL'" >&2 || exit 1
-    if [[ -n $COMMIT_ID ]] ; then
-        echo -e "Installing osm-RO from refspec: $COMMIT_ID"
-        su $SUDO_USER -c "git -C ${BASEFOLDER} checkout $COMMIT_ID" ||
-            ! echo "Error cannot checkout '$COMMIT_ID' from '$GIT_URL'" >&2 || exit 1
-    elif [[ -z $DEVELOP ]]; then
-        LATEST_STABLE_TAG=`git -C "${BASEFOLDER}" tag -l "v[0-9]*" | sort -V | tail -n1`
-        echo -e "Installing osm-RO from refspec: tags/${LATEST_STABLE_TAG}"
-        su $SUDO_USER -c "git -C ${BASEFOLDER} checkout tags/${LATEST_STABLE_TAG}" ||
-            ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '$GIT_URL'" >&2 || exit 1
-    else
-        echo -e "Installing osm-RO from refspec: master"
-    fi
-    su $SUDO_USER -c "cp ${BASEFOLDER}/.gitignore-common ${BASEFOLDER}/.gitignore"
-fi
-
-echo -e "\n"\
-    "#################################################################\n"\
-    "#####        INSTALLING OSM-IM LIBRARY                      #####\n"\
-    "#################################################################"
-    ${BASEFOLDER}/scripts/install-osm-im.sh
-    OSM_IM_PATH=`python -c 'import osm_im; print osm_im.__path__[0]'` ||
-        ! echo "ERROR installing python-osm-im library!!!" >&2  || exit 1
-
-echo -e "\n"\
-    "#################################################################\n"\
-    "#####        INSTALLING OVIM LIBRARY                        #####\n"\
-    "#################################################################"
-    ${BASEFOLDER}/scripts/install-lib-osm-openvim.sh
-    OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'` ||
-        ! echo "ERROR installing python-lib-osm-openvim library!!!" >&2  || exit 1
-
-if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
-then
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        CONFIGURE firewalld                            #####\n"\
-        "#################################################################"
-    if [[ -z $QUIET_MODE ]] || ask_user "Configure firewalld for openmanod port 9090 (Y/n)? " y
-    then
-        #Creates a service file for openmano
-        echo '<?xml version="1.0" encoding="utf-8"?>
-<service>
- <short>openmanod</short>
- <description>openmanod service</description>
- <port protocol="tcp" port="9090"/>
-</service>' > /etc/firewalld/services/openmanod.xml
-        #put proper permissions
-        pushd /etc/firewalld/services > /dev/null
-        restorecon openmanod.xml
-        chmod 640 openmanod.xml
-        popd > /dev/null
-        #Add the openmanod service to the default zone permanently and reload the firewall configuration
-        firewall-cmd --permanent --add-service=openmanod > /dev/null
-        firewall-cmd --reload > /dev/null
-        echo "done." 
-    else
-        echo "skipping."
-    fi
-fi
-
-echo -e "\n"\
-    "#################################################################\n"\
-    "#####        CONFIGURE OPENMANO CLIENT                      #####\n"\
-    "#################################################################"
-#creates a link at ~/bin if not configured as a service
-if [[ -z "$INSTALL_AS_A_SERVICE" ]]
-then
-    su $SUDO_USER -c 'mkdir -p ${HOME}/bin'
-    su $SUDO_USER -c 'rm -f ${HOME}/bin/openmano'
-    su $SUDO_USER -c 'rm -f ${HOME}/bin/openmano-report'
-    su $SUDO_USER -c 'rm -f ${HOME}/bin/service-openmano'
-    su $SUDO_USER -c "ln -s '${BASEFOLDER}/openmano' "'${HOME}/bin/openmano'
-    su $SUDO_USER -c "ln -s '${BASEFOLDER}/scripts/openmano-report.sh'   "'${HOME}/bin/openmano-report'
-    su $SUDO_USER -c "ln -s '${BASEFOLDER}/scripts/service-openmano'  "'${HOME}/bin/service-openmano'
-
-    #insert /home/<user>/bin in the PATH
-    #skiped because normally this is done authomatically when ~/bin exists
-    #if ! su $SUDO_USER -c 'echo $PATH' | grep -q "${HOME}/bin"
-    #then
-    #    echo "    inserting /home/$SUDO_USER/bin in the PATH at .bashrc"
-    #    su $SUDO_USER -c 'echo "PATH=\$PATH:\${HOME}/bin" >> ~/.bashrc'
-    #fi
-    
-    if [[ $SUDO_USER == root ]]
-    then
-        if ! echo $PATH | grep -q "${HOME}/bin"
-        then
-            echo "PATH=\$PATH:\${HOME}/bin" >> ${HOME}/.bashrc
-        fi
-    fi
-fi
-
-#configure arg-autocomplete for this user
-#in case of minimal instalation this package is not installed by default
-[[ "$_DISTRO" == "CentOS" || "$_DISTRO" == "Red" ]] && yum install -y bash-completion
-#su $SUDO_USER -c 'mkdir -p ~/.bash_completion.d'
-su $SUDO_USER -c 'activate-global-python-argcomplete --user'
-if ! su  $SUDO_USER -c 'grep -q bash_completion.d/python-argcomplete.sh ${HOME}/.bashrc'
-then
-    echo "    inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc"
-    su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
-fi
-
-if [ -z "$NO_DB" ]; then
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####               INSTALL DATABASE SERVER                 #####\n"\
-        "#################################################################"
-
-    if [ -n "$QUIET_MODE" ]; then
-        DB_QUIET='-q'
-    fi
-    ${BASEFOLDER}/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} $DB_QUIET $DB_FORCE_UPDATE || exit 1
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        CREATE AND INIT MANO_VIM DATABASE              #####\n"\
-        "#################################################################"
-    # Install mano_vim_db after setup
-    ${OSMLIBOVIM_PATH}/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} -u mano -p manopw -d mano_vim_db --no-install-packages $DB_QUIET $DB_FORCE_UPDATE || exit 1
-fi   # [ -z "$NO_DB" ]
-
-if [[ -n "$INSTALL_AS_A_SERVICE"  ]]
-then
-    echo -e "\n"\
-        "#################################################################\n"\
-        "#####        CONFIGURE OPENMANO SERVICE                     #####\n"\
-        "#################################################################"
-
-    ${BASEFOLDER}/scripts/install-openmano-service.sh -f ${BASEFOLDER} `[[ -z "$NOCLONE" ]] && echo "-d"`
-    # rm -rf ${BASEFOLDER}
-    # alias service-openmano="service openmano"
-    # echo 'alias service-openmano="service openmano"' >> ${HOME}/.bashrc
-    echo
-    echo "Done!  installed at /opt/openmano"
-    echo " Manage server with 'sudo -E service osm-ro start|stop|status|...' "
-else
-    echo
-    echo "Done!  you may need to logout and login again for loading client configuration"
-    echo " Run './${BASEFOLDER}/scripts/service-openmano start' for starting openmano in a screen"
-fi
diff --git a/scripts/install-osm-im.sh b/scripts/install-osm-im.sh
deleted file mode 100755 (executable)
index 2fad214..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
-
-# author: Alfonso Tierno
-
-# It uses following env, if not provided filling by default
-[ -z "$GIT_OSMIM_URL" ] && GIT_OSMIM_URL=https://osm.etsi.org/gerrit/osm/IM.git
-[ -z "$DEVELOP" ] && DEVELOP=""
-# folder where RO is installed
-[ -z "$BASEFOLDER" ] && HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]})) && BASEFOLDER=$(dirname $HERE)
-[ -z "$SUDO_USER" ] && SUDO_USER="$USER"
-[ -z "$NO_PACKAGES" ] && NO_PACKAGES=""
-[ -z "$_DISTRO" ] && _DISTRO="Ubuntu"
-
-function usage(){
-    echo -e "usage: sudo -E $0 [OPTIONS]"
-    echo -e "Install last stable source code of osm-im and the needed packages"
-    echo -e "  OPTIONS"
-    echo -e "     -h --help:  show this help"
-    echo -e "     -b REFSPEC: install from source code using a specific branch (master, v2.0, ...) or tag"
-    echo -e "                    -b master          (main branch)"
-    echo -e "                    -b v2.0            (v2.0 branch)"
-    echo -e "                    -b tags/v1.1.0     (a specific tag)"
-    echo -e "                    ..."
-    echo -e "     --develop:  install last master version for developers"
-    echo -e "     --no-install-packages: use this option to skip updating and installing the requires packages. This" \
-            "avoid wasting time if you are sure requires packages are present e.g. because of a previous installation"
-}
-while getopts ":b:h-:" o; do
-    case "${o}" in
-        b)
-            export COMMIT_ID=${OPTARG}
-            ;;
-        h)
-            usage && exit 0
-            ;;
-        -)
-            [ "${OPTARG}" == "help" ] && usage && exit 0
-            [ "${OPTARG}" == "develop" ] && export DEVELOP="y" && continue
-            [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && export DEBIAN_FRONTEND=noninteractive && continue
-            [ "${OPTARG}" == "no-install-packages" ] && export NO_PACKAGES=yes && continue
-            echo -e "Invalid option: '--$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        \?)
-            echo -e "Invalid option: '-$OPTARG'\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        :)
-            echo -e "Option '-$OPTARG' requires an argument\nTry $0 --help for more information" >&2
-            exit 1
-            ;;
-        *)
-            usage >&2
-            exit 1
-            ;;
-    esac
-done
-
-su $SUDO_USER -c "git -C ${BASEFOLDER} clone ${GIT_OSMIM_URL} IM" ||
-    ! echo "Error cannot clone from '${GIT_OSMIM_URL}'" >&2 || exit 1
-if [[ -n $COMMIT_ID ]] ; then
-    echo -e "Installing osm-IM from refspec: $COMMIT_ID"
-    su $SUDO_USER -c "git -C ${BASEFOLDER}/IM checkout $COMMIT_ID" ||
-        ! echo "Error cannot checkout '$COMMIT_ID' from '${GIT_OSMIM_URL}'" >&2 || exit 1
-elif [[ -z $DEVELOP ]]; then
-    LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/IM" tag -l "v[0-9]*" | sort -V | tail -n1`
-    echo -e "Installing osm-IM from refspec: tags/${LATEST_STABLE_TAG}"
-    su $SUDO_USER -c "git -C ${BASEFOLDER}/IM checkout tags/${LATEST_STABLE_TAG}" ||
-        ! echo "Error cannot checkout 'tags/${LATEST_STABLE_TAG}' from '${GIT_OSMIM_URL}'" >&2 || exit 1
-else
-    echo -e "Installing osm-IM from refspec: master"
-fi
-
-# Install debian dependencies before setup.py
-if [[ -z "$NO_PACKAGES" ]]
-then
-    # apt-get update
-    # apt-get install -y git python-pip
-    # pip2 install pip==9.0.3
-    pip2 install pyangbind || exit 1
-fi
-
-PYBINDPLUGIN=$(python2 -c 'import pyangbind; import os; print "%s/plugin" % os.path.dirname(pyangbind.__file__)')
-su $SUDO_USER -c 'mkdir -p "'${BASEFOLDER}/IM/osm_im'"'
-su $SUDO_USER -c 'touch "'${BASEFOLDER}/IM/osm_im/__init__.py'"'
-# wget -q https://raw.githubusercontent.com/RIFTIO/RIFT.ware/RIFT.ware-4.4.1/modules/core/util/yangtools/yang/rw-pb-ext.yang -O "${BASEFOLDER}/IM/models/yang/rw-pb-ext.yang"
-for target in vnfd nsd ; do
-    pyang -Werror --path "${BASEFOLDER}/IM/models/yang" --plugindir "${PYBINDPLUGIN}" -f pybind \
-        -o "${BASEFOLDER}/IM/osm_im/${target}.py" "${BASEFOLDER}/IM/models/yang/${target}.yang"
-done
-
-pip2 install -e "${BASEFOLDER}/IM" || ! echo "ERROR installing python-osm-im library!!!" >&2  || exit 1
diff --git a/scripts/openmano-report b/scripts/openmano-report
deleted file mode 100755 (executable)
index f2180af..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#It generates a report for debugging
-
-DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
-DIRNAME=$(dirname $DIRNAME )
-OMCLIENT=openmano
-
-#get screen log files at the beginning
-echo
-echo "-------------------------------"
-echo "log files"
-echo "-------------------------------"
-echo "-------------------------------"
-echo "OPENMANO"
-echo "-------------------------------"
-echo "cat /var/log/osm/openmano.log*"
-cat /var/log/osm/openmano.log*
-echo
-echo "-------------------------------"
-echo
-
-#get version
-echo
-echo "-------------------------------"
-echo "version"
-echo "-------------------------------"
-echo "-------------------------------"
-echo "OPENMANO"
-echo "-------------------------------"
-echo "openmanod --version"
-openmanod --version
-echo
-echo "-------------------------------"
-echo
-
-#get configuration files
-echo "-------------------------------"
-echo "Configuration files"
-echo "-------------------------------"
-echo "-------------------------------"
-echo "OPENMANO"
-echo "-------------------------------"
-echo "cat /etc/osm/openmanod.cfg"
-cat /etc/osm/openmanod.cfg
-echo "-------------------------------"
-echo
-
-#get list of items
-for verbose in "" "-vvv"
-do
-  echo "-------------------------------"
-  echo "OPENMANO$verbose"
-  echo "-------------------------------"
-  echo "$OMCLIENT config $verbose"
-  $OMCLIENT config
-  echo "-------------------------------"
-  echo "$OMCLIENT tenant-list $verbose"
-  $OMCLIENT tenant-list $verbose
-  echo "-------------------------------"
-  echo "$OMCLIENT datacenter-list --all"
-  $OMCLIENT datacenter-list --all
-  echo "-------------------------------"
-  echo "$OMCLIENT datacenter-list $verbose"
-  $OMCLIENT datacenter-list $verbose
-  echo "-------------------------------"
-  dclist=`$OMCLIENT datacenter-list |awk '{print $1}'`
-  for dc in $dclist; do
-    echo "$OMCLIENT datacenter-net-list $dc $verbose"
-    $OMCLIENT datacenter-net-list $dc $verbose
-    echo "-------------------------------"
-  done
-  echo "$OMCLIENT vnf-list $verbose"
-  $OMCLIENT vnf-list $verbose
-  echo "-------------------------------"
-  vnflist=`$OMCLIENT vnf-list |awk '$1!="No" {print $1}'`
-  for vnf in $vnflist; do
-    echo "$OMCLIENT vnf-list $vnf $verbose"
-    $OMCLIENT vnf-list $vnf $verbose
-    echo "-------------------------------"
-  done
-  echo "$OMCLIENT scenario-list $verbose"
-  $OMCLIENT scenario-list $verbose
-  echo "-------------------------------"
-  scenariolist=`$OMCLIENT scenario-list |awk '$1!="No" {print $1}'`
-  for sce in $scenariolist; do
-    echo "$OMCLIENT scenario-list $sce $verbose"
-    $OMCLIENT scenario-list $sce $verbose
-    echo "-------------------------------"
-  done
-  echo "$OMCLIENT instance-scenario-list $verbose"
-  $OMCLIENT instance-scenario-list $verbose
-  echo "-------------------------------"
-  instancelist=`$OMCLIENT instance-scenario-list |awk '$1!="No" {print $1}'`
-  for i in $instancelist; do
-    echo "$OMCLIENT instance-scenario-list $i $verbose"
-    $OMCLIENT instance-scenario-list $i $verbose
-    echo "-------------------------------"
-  done
-  echo
-
-done
-echo
diff --git a/scripts/python-osm-ro.postinst b/scripts/python-osm-ro.postinst
deleted file mode 100755 (executable)
index 0f6e5a8..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-
-##
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: OSM_TECH@list.etsi.org
-##
-
-echo "POST INSTALL OSM-RO"
-OSMRO_PATH=`python -c 'import osm_ro; print osm_ro.__path__[0]'`
-#OSMLIBOVIM_PATH=`python -c 'import lib_osm_openvim; print lib_osm_openvim.__path__[0]'`
-
-#Pip packages required for vmware connector
-pip2 install pip==9.0.3
-pip2 install --upgrade pyvcloud==19.1.1
-pip2 install --upgrade progressbar
-pip2 install --upgrade prettytable
-pip2 install --upgrade pyvmomi
-pip2 install --upgrade pyang pyangbind
-pip2 install untangle
-pip2 install pyone
-pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca
-pip2 install azure
-
-# Packages required for fos connector
-pip2 install fog05rest
-
-
-systemctl enable osm-ro.service
-
-#Creation of log folder
-mkdir -p /var/log/osm
-
-#configure arg-autocomplete for this user
-su $SUDO_USER -c 'activate-global-python-argcomplete --user'
-if ! su  $SUDO_USER -c 'grep -q bash_completion.d/python-argcomplete.sh ${HOME}/.bashrc'
-then
-    echo "    inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc"
-    su $SUDO_USER -c 'echo ". ${HOME}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc'
-fi
-
-echo '
-To make OSM RO work, you have to install mysql and a database, and finally start osm-ro service'
-echo "     ${OSMRO_PATH}/database_utils/install-db-server.sh # -h for help"
-echo '     service osm-ro start'
-
-
diff --git a/scripts/service-openmano b/scripts/service-openmano
deleted file mode 100755 (executable)
index a44743d..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-
-#launch openmano inside a screen.
-#or call service if it is installed on systemd
-
-
-DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
-DIRNAME=$(dirname $DIRNAME )
-DIR_OM=$(dirname $DIRNAME )
-
-function usage(){
-    echo -e "Usage: $0 [openmano/mano] start|stop|restart|status"
-    echo -e "  Launch|Removes|Restart|Getstatus openmano on a screen/service"
-    echo -e "    -n --screen-name NAME : name of screen to launch openmano (default mano or service)"
-    echo -e "    -h --help: shows this help"
-    echo -e "    -- PARAMS use to separate PARAMS that will be send to the service. e.g. -pPORT -PADMINPORT --dbname=DDBB"
-}
-
-
-function kill_pid(){
-    #send TERM signal and wait 5 seconds and send KILL signal ir still running
-    #PARAMS: $1: PID of process to terminate
-    kill $1 #send TERM signal
-    WAIT=5
-    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
-    do
-        sleep 1
-        WAIT=$((WAIT-1))
-        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
-    done
-    echo "done"
-}
-
-#process options
-source ${DIRNAME}/get-options.sh "screen-name:n= help:h --" $* || exit 1
-
-#help
-[ -n "$option_help" ] && usage && exit 0
-
-
-#obtain parameters
-om_list=""
-#om_action="start"  #uncoment to get a default action
-action_list=""
-om_params="$option__"
-
-for param in $params
-do
-    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
-    [ "$param" == "openmano" -o "$param" == "mano" ]   && om_list="$om_list mano"             && continue
-    #short options
-    echo "invalid argument '$param'?  Type -h for help" >&2 && exit 1
-done
-
-[[ -n $option_screen_name ]] && option_screen_name=${option_screen_name#*.} #allow the format 'pid.name' and keep only name
-#check action is provided
-[ -z "$om_action" ] && usage >&2 && exit -1
-
-#if no componenets supplied assume all
-[ -z "$om_list" ] && om_list="mano"
-function find_process_id(){ #PARAMS:  command screen-name
-    for process_id in `ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep "${1}" | awk '{print $1}'`
-    do
-        scname=$(ps wwep $process_id | grep -o 'STY=\S*')
-        scname=${scname#STY=}
-        [[ -n "$2" ]] && [[ "${scname#*.}" != "$2" ]] && continue
-        echo -n "${process_id} "
-    done
-    echo
-}
-
-for om_component in $om_list
-do
-    screen_name="${om_component}"
-    [[ -n "$option_screen_name" ]] && screen_name=$option_screen_name
-    [ "${om_component}" == "mano" ] && om_cmd="./openmanod"   && om_name="openmano  " && om_dir=$(readlink -f ${DIR_OM})
-    #obtain PID of program
-    component_id=`find_process_id "${om_cmd}" $option_screen_name`
-    processes=$(echo $component_id | wc -w)
-
-    #status
-    if [ "$om_action" == "status" ]
-    then
-        running=""
-        for process_id in $component_id
-        do
-            scname=$(ps wwep $process_id | grep -o 'STY=\S*')
-            scname=${scname#STY=}
-            [[ -n "$option_screen_name" ]] && [[ "${scname#*.}" != "$option_screen_name" ]] && continue
-            printf "%-15s" "pid: ${process_id},"
-            [[ -n "$scname" ]] && printf "%-25s" "screen: ${scname},"
-            echo cmd: $(ps -o cmd p $process_id | tail -n1 )
-            running=y
-        done
-        #if installed as a service and it is not provided a screen name call service
-        [[ -f /etc/systemd/system/osm-ro.service ]] && [[ -z $option_screen_name ]] && running=y #&& service osm-ro status
-        if [ -z "$running" ]
-        then
-            echo -n "    $om_name not running" && [[ -n "$option_screen_name" ]] && echo " on screen '$option_screen_name'" || echo
-        fi
-    fi
-
-    #if installed as a service and it is not provided a screen name call service
-    [[ -f /etc/systemd/system/osm-ro.service ]] && [[ -z $option_screen_name ]] && service osm-ro $om_action && ( [[ $om_action == status ]] || sleep 5 ) && exit $?
-
-
-    #stop
-    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
-    then
-        #terminates program
-        [ $processes -gt 1 ] && echo "$processes processes are running, specify with --screen-name" && continue
-        [ $processes -eq 1 ] && echo -n "    stopping $om_name ... " && kill_pid $component_id
-        component_id=""
-        #terminates screen
-        if screen -wipe | grep -q -e  "\.${screen_name}\b" 
-        then
-            screen -S $screen_name -p 0 -X stuff "exit\n" || echo
-            sleep 1
-        fi
-    fi
-
-    #start
-    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
-    then
-        #calculates log file name
-        logfile=""
-        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/open${screen_name}.log || echo "can not create logs directory  $DIR_OM/logs"
-        #check already running
-        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
-        #create screen if not created
-        echo -n "    starting $om_name ... "
-        if ! screen -wipe | grep -q -e "\.${screen_name}\b"
-        then
-            pushd ${om_dir} > /dev/null
-            screen -dmS ${screen_name}  bash
-            sleep 1
-            popd > /dev/null
-        else
-            echo -n " using existing screen '${screen_name}' ... "
-            screen -S ${screen_name} -p 0 -X log off
-            screen -S ${screen_name} -p 0 -X stuff "cd ${om_dir}\n"
-            sleep 1
-        fi
-        #move old log file index one number up and log again in index 0
-        if [[ -n $logfile ]]
-        then
-            for index in 8 7 6 5 4 3 2 1
-            do
-                [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
-            done
-            [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
-            screen -S ${screen_name} -p 0 -X logfile ${logfile}
-            screen -S ${screen_name} -p 0 -X log on
-        fi
-        #launch command to screen
-        screen -S ${screen_name} -p 0 -X stuff "${om_cmd}${om_params}\n"
-        #check if is running
-        [[ -n $logfile ]] && timeout=120 #2 minute
-        [[ -z $logfile ]] && timeout=20
-        while [[ $timeout -gt 0 ]]
-        do
-           #check if is running
-           #echo timeout $timeout
-           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
-           log_lines=0
-           [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
-           component_id=`find_process_id "${om_cmd}${om_params}" $screen_name`
-           if [[ -z $component_id ]]
-           then #process not started or finished
-               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
-               #started because writted serveral lines at log so report error
-           fi
-           [[ -n $logfile ]] && grep -q "open${om_component}d ready" ${logfile} && break
-           sleep 1
-           timeout=$((timeout -1))
-        done
-        if [[ -n $logfile ]] && [[ $timeout == 0 ]] 
-        then 
-           echo -n "timeout!"
-        else
-           echo -n "running on 'screen -x ${screen_name}'."
-        fi
-        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
-    fi
-done
-
-
-
-
diff --git a/setup.py b/setup.py
deleted file mode 100755 (executable)
index 5d78ec3..0000000
--- a/setup.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-
-#from distutils.core import setup
-#from distutils.command.install_data import install_data
-from setuptools import setup
-from os import system
-#import glob
-
-_name = 'osm_ro'
-_description = 'OSM Resource Orchestrator'
-_author = 'ETSI OSM'
-_author_email = 'alfonso.tiernosepulveda@telefonica.com'
-_maintainer = 'garciadeblas'
-_maintainer_email = 'gerardo.garciadeblas@telefonica.com'
-_license = 'Apache 2.0'
-_url = 'https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary'
-_requirements = [
-    "six",  # python 2 x 3 compatibility
-    "PyYAML",
-    "bottle",
-    #"mysqlclient",
-    #"MySQLdb",
-    "jsonschema",
-    "paramiko",
-    "argcomplete",
-    "requests",
-    "logutils",
-    "python-openstackclient",
-    "python-novaclient",
-    "python-keystoneclient",
-    "python-glanceclient",
-    "python-neutronclient",
-    "python-cinderclient",
-    "networking-l2gw",
-    #"pyvcloud",
-    #"progressbar",
-    "prettytable",
-    #"pyvmomi",
-    "boto",
-    #"lib_osm_openvim",
-    #"osm_im",
-    "pycrypto",
-    "netaddr",
-]
-
-setup(name=_name,
-      version_command=('git describe --match v*', 'pep440-git-full'),
-      description = _description,
-      long_description = open('README.rst').read(),
-      author = _author,
-      author_email = _author_email,
-      maintainer = _maintainer,
-      maintainer_email = _maintainer_email,
-      url = _url,
-      license = _license,
-      packages = [_name],
-      #packages = ['osm_ro', 'osm_roclient'],
-      package_dir = {_name: _name},
-      package_data = {_name: ['vnfs/*.yaml', 'vnfs/examples/*.yaml',
-                         'scenarios/*.yaml', 'scenarios/examples/*.yaml',
-                         'instance-scenarios/examples/*.yaml', 'database_utils/*',
-                         'scripts/*']},
-      data_files = [('/etc/osm/', ['osm_ro/openmanod.cfg']),
-                   ('/etc/systemd/system/', ['osm_ro/osm-ro.service']),
-                   ],
-      scripts=['openmanod', 'openmano', 'osm_ro/scripts/service-openmano', 'osm_ro/scripts/openmano-report',
-          'osm_ro/scripts/RO-start.sh'],
-      install_requires=_requirements,
-      include_package_data=True,
-      setup_requires=['setuptools-version-command'],
-      #test_suite='nose.collector',
-      )
-
diff --git a/stdeb.cfg b/stdeb.cfg
deleted file mode 100644 (file)
index 3751334..0000000
--- a/stdeb.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-Suite: xenial
-XS-Python-Version: >= 2.7
-Maintainer: Gerardo Garcia <gerardo.garciadeblas@telefonica.com>
-Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-networking-l2gw, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im, python-networkx, genisoimage
-
diff --git a/test-docker/Dockerfile-devops b/test-docker/Dockerfile-devops
new file mode 100644 (file)
index 0000000..25eb013
--- /dev/null
@@ -0,0 +1,65 @@
+##
+# Copyright 2019 ETSI
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+########################################################################
+
+from ubuntu:18.04
+
+MAINTAINER  Alfonso Tierno <alfonso.tiernosepulveda@telefoncia.com>
+
+RUN apt-get update && apt-get -y install curl software-properties-common git tox python3-pip \
+    && python3 -m pip install --upgrade pip && python3 -m pip install pyangbind
+
+ARG REPOSITORY_BASE=http://osm-download.etsi.org/repository/osm/debian
+ARG RELEASE=ReleaseSIX-daily
+ARG REPOSITORY_KEY=OSM%20ETSI%20Release%20Key.gpg
+ARG REPOSITORY=testing
+
+RUN curl ${REPOSITORY_BASE}/${RELEASE}/${REPOSITORY_KEY} | apt-key add -
+RUN add-apt-repository -y "deb ${REPOSITORY_BASE}/${RELEASE} ${REPOSITORY} IM common openvim" && apt-get update
+
+ARG RO_VERSION
+ARG IM_VERSION
+
+COPY temp /app
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-osm-im${IM_VERSION} \
+    && DEBIAN_FRONTEND=noninteractive dpkg -i --force-depends /app/*.deb \
+    && DEBIAN_FRONTEND=noninteractive apt-get  -y -f install
+
+EXPOSE 9090
+
+# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers
+# These ENV must be provided
+ENV RO_DB_HOST=""
+ENV RO_DB_OVIM_HOST=""
+    # if empty RO_DB_HOST is assumed
+
+# These ENV should be provided first time for creating database. It will create and init only if empty!
+ENV RO_DB_ROOT_PASSWORD=""
+ENV RO_DB_OVIM_ROOT_PASSWORD=""
+    # if empty RO_DB_ROOT_PASSWORD is assumed
+
+# These ENV can be provided, but default values are ok
+ENV RO_DB_USER=mano
+ENV RO_DB_OVIM_USER=mano
+ENV RO_DB_PASSWORD=manopw
+ENV RO_DB_OVIM_PASSWORD=manopw
+ENV RO_DB_PORT=3306
+ENV RO_DB_OVIM_PORT=3306
+ENV RO_DB_NAME=mano_db
+ENV RO_DB_OVIM_NAME=mano_vim_db
+
+CMD RO-start.sh
diff --git a/test-docker/test-gen-devops.sh b/test-docker/test-gen-devops.sh
new file mode 100755 (executable)
index 0000000..3f79787
--- /dev/null
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+# Generates the debian packages; and then generates a docker image base on Dockerfile-devops and update a
+# running docker stack with the generated image
+
+HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+export RO_BASE=$(dirname $HERE)
+
+# clean
+docker rm -f ro_pkg 2>/dev/null && echo docker ro_pkg removed
+rm -rf $HERE/temp/*
+mkdir -p $HERE/temp
+
+echo -e "\n\n[STAGE 1] Builind dockerfile userd for the package generation"
+docker build $RO_BASE -f $RO_BASE/Dockerfile  -t opensourcemano/ro_pkg
+sleep 2
+
+echo "[STAGE 1.1] Generting packages inside docker ro_pkg"
+docker run -d --name ro_pkg opensourcemano/ro_pkg bash -c 'sleep 3600'
+docker cp $RO_BASE ro_pkg:/RO
+docker exec ro_pkg bash -c 'cd /RO;  ./devops-stages/stage-build.sh'
+deb_files=`docker exec ro_pkg bash -c 'ls /RO/deb_dist/'`
+[ -z "$deb_files" ] && echo "No packages generated" >&2 && exit 1
+echo $deb_files
+
+echo -e "\n\n[STAGE 1.2] Print package information and copy to '$HERE/temp/'"
+# print package information and copy to "$HERE/temp/"
+for deb_file in $deb_files ; do
+   echo; echo; echo
+   echo $deb_file info:
+   echo "===========================" 
+   docker cp ro_pkg:/RO/deb_dist/$deb_file $HERE/temp/
+   dpkg -I $HERE/temp/$(basename $deb_file)
+done
+
+# docker rm -f ro_pkg
+echo -e "\n\n[STAGE 2] Building docker image opensourcemano/ro:py3_devops based on debian packages"
+docker build $HERE -f $HERE/Dockerfile-devops  -t opensourcemano/ro:py3_devops ||
+    ! echo "error generating devops dockerfile" >&2 || exit 1
+sleep 2
+# docker run -d --name ro_devops opensourcemano/ro:py3_devops
+# docker run -ti exec ro_devops ro tenant-list  || ! echo "Cannot exec ro client to get server tenants" >&2 || exit 1
+
+echo -e "\n\n[STAGE 3] Update service osm_ro with generated docker image"
+docker service update osm_ro --force --image opensourcemano/ro:py3_devops
+sleep 2
+docker container prune -f
+docker service logs osm_ro
diff --git a/test-docker/test-gen-local.sh b/test-docker/test-gen-local.sh
new file mode 100755 (executable)
index 0000000..0b9c73e
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+# Generates a docker image base on Dockerfile-local and update a running docker stack with the generated image
+
+HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+export RO_BASE=$(dirname $HERE)
+
+echo -e "\n\n[STAGE 1] Building docker image opensourcemano/ro:py3_local based on debian packages"
+docker build $RO_BASE -f $RO_BASE/Dockerfile-local -t opensourcemano/ro:py3_local ||
+    ! echo "error generating local dockerfile" >&2 || exit 1
+sleep 2
+docker service update osm_ro --force --image opensourcemano/ro:py3_local
+sleep 2
+docker container prune -f
+docker service logs osm_ro
diff --git a/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml b/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml
deleted file mode 100644 (file)
index 22e372f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          simple_ha
-  description:   Simple network scenario consisting of two VNF connected to an external network
-  vnfs:
-    linux1:                   # vnf/net name in the scenario
-      vnf_name:  linux_test_2vms # VNF name as introduced in OPENMANO DB
-  networks:
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces:
-      - linux1:  control0       # Node and its interface
-      - linux1:  control1       # Node and its interface
-
-
-
-
diff --git a/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml b/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml
deleted file mode 100644 (file)
index 53c7770..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-
-vnf:
-    name:        linux_test_2vms
-    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
-    external-connections:
-    -   name:              control0
-        type:              mgmt              # "mgmt" (autoconnect to management net), "bridge", "data"
-        VNFC:              linux-VM-HA-A  # Virtual Machine this interface belongs to
-        local_iface_name:  eth0             # interface name inside this Virtual Machine (must be defined in the VNFC section)
-        description:       Management interface 0
-    -   name:              control1
-        type:              mgmt              # "mgmt" (autoconnect to management net), "bridge", "data"
-        VNFC:              linux-VM-HA-B  # Virtual Machine this interface belongs to
-        local_iface_name:  eth0             # interface name inside this Virtual Machine (must be defined in the VNFC section)
-        description:       Management interface 1
-    VNFC:
-    -   name:        linux-VM-HA-A
-        description: Generic Linux Virtual Machine
-        availability_zone: A  # availanility zone A
-        #Copy the image to a compute path and edit this path
-        image name:  TestVM
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk: 10
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:11.0"
-        numas: []
-    -   name:        linux-VM-HA-B
-        description: Generic Linux Virtual Machine
-        availability_zone: B # availanility zone B
-        #Copy the image to a compute path and edit this path
-        image name:  TestVM
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk: 10
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:12.0"
-        numas: []
diff --git a/test/RO_tests/empy_volume/scenario_additional_disk_empty_volume.yaml b/test/RO_tests/empy_volume/scenario_additional_disk_empty_volume.yaml
deleted file mode 100644 (file)
index c0b541c..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          vnf_additional_disk_empty_volume
-  description:   Just deploy vnf_2_disks
-  public:        false      # if available for other tenants
-  vnfs:
-    vnf_2_disks:                     # vnf name in the scenario
-      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
-      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
-      vnf_name:  vnf_additional_disk_empty_volume   #can fail if several vnfs matches this name
-      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
-  networks:                
-    mgmt:
-      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
-      type:      bridge
-      external:  true       #this will be connected outside
-      interfaces:
-      -   vnf_2_disks:  mgmt0
-
diff --git a/test/RO_tests/empy_volume/vnfd_additional_disk_empty_volume.yaml b/test/RO_tests/empy_volume/vnfd_additional_disk_empty_volume.yaml
deleted file mode 100644 (file)
index 7a6f5c2..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name: vnf_additional_disk_empty_volume
-    description: VNF with additional volume based on image
-    # class: parent      # Optional. Used to organize VNFs
-    external-connections:
-    -   name:              mgmt0
-        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
-        VNFC:              TEMPLATE-VM # Virtual Machine this interface belongs to
-        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
-        description:       Management interface
-    VNFC:                              # Virtual machine array 
-    -   name:        TEMPLATE-VM       # name of Virtual Machine
-        description: TEMPLATE description
-        image name: ubuntu16.04
-        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
-        # processor:                     #Optional
-        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
-        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
-        # hypervisor:                    #Optional
-        #     type: QEMU-kvm
-        #     version: "10002|12001|2.6.32-358.el6.x86_64"
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk: 5          # disk size in GiB, by default 1
-        #numas: 
-        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
-        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
-        #    memory: 14                 # GBytes
-        #    interfaces: []
-        bridge-ifaces:
-        -   name:      mgmt0
-            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
-            bandwidth: 1 Mbps            # Optional. Informative only
-            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
-            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
-        devices:                       # Optional, order determines device letter asignation (hda, hdb, ...)
-        -   type:      disk            # "disk","cdrom","xml"
-            size: 1
-            # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
-            # vpci:      "0000:00:03.0"   # Optional, not for disk or cdrom
-    # Additional Virtual Machines would be included here
-
diff --git a/test/RO_tests/floating_ip/scenario_floating_ip.yaml b/test/RO_tests/floating_ip/scenario_floating_ip.yaml
deleted file mode 100644 (file)
index dcfb239..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          vnf_floating_ip
-  description:   vnf_floating_ip
-  public:        false      # if available for other tenants
-  vnfs:
-    vnf_floating_ip:                     # vnf name in the scenario
-      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
-      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
-      vnf_name:  vnf_floating_ip   #can fail if several vnfs matches this name
-      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
-  networks:                
-    mgmt:
-      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
-      type:      bridge
-      external:  true       #this will be connected outside
-      interfaces:
-      -   vnf_floating_ip:  mgmt0
-
diff --git a/test/RO_tests/floating_ip/vnfd_floating_ip.yaml b/test/RO_tests/floating_ip/vnfd_floating_ip.yaml
deleted file mode 100644 (file)
index 0d305a9..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name: vnf_floating_ip
-    description: VNF disabling port_security option in mgmt interface 
-    # class: parent      # Optional. Used to organize VNFs
-    external-connections:
-    -   name:              mgmt0
-        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
-        VNFC:              vnf_floating_ip # Virtual Machine this interface belongs to
-        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
-        description:       Management interface
-    VNFC:                              # Virtual machine array 
-    -   name:        vnf_floating_ip       # name of Virtual Machine
-        description: vnf_floating_ip
-        image name: ubuntu16.04
-        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
-        # processor:                     #Optional
-        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
-        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
-        # hypervisor:                    #Optional
-        #     type: QEMU-kvm
-        #     version: "10002|12001|2.6.32-358.el6.x86_64"
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk: 5          # disk size in GiB, by default 1
-        #numas: 
-        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
-        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
-        #    memory: 14                 # GBytes
-        #    interfaces: []
-        bridge-ifaces:
-        -   name:      mgmt0
-            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
-            bandwidth: 1 Mbps            # Optional. Informative only
-            floating-ip: True
-            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
-            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
-    # Additional Virtual Machines would be included here
-
diff --git a/test/RO_tests/image_based_volume/scenario_additional_disk_based_image.yaml b/test/RO_tests/image_based_volume/scenario_additional_disk_based_image.yaml
deleted file mode 100644 (file)
index 34ffeb2..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
----
-nsd:nsd-catalog:
-    nsd:
-    -   id:          test_2vdu_nsd
-        name:        additional_disk_based_image
-        short-name:  2disks
-        description: Just deploy vnf_2_disks
-        vendor:      OSM
-        version:     '1.0'
-        constituent-vnfd:
-        -   member-vnf-index: vnf2disks
-            vnfd-id-ref: additional_disk_based_image
-        vld:
-        # Networks for the VNFs
-        -   id:         vld1
-            name:       mgmt
-            short-name: vld1-sname
-            type:       ELAN
-            mgmt-network: 'true'
-            vnfd-connection-point-ref:
-            -   member-vnf-index-ref: vnf2disks
-                vnfd-id-ref: additional_disk_based_image
-                vnfd-connection-point-ref: mgmt0
diff --git a/test/RO_tests/image_based_volume/vnfd_additional_disk_based_image.yaml b/test/RO_tests/image_based_volume/vnfd_additional_disk_based_image.yaml
deleted file mode 100644 (file)
index 30aafac..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
----
-vnfd-catalog:
-    vnfd:
-    -   connection-point:
-        -   name: mgmt0
-            type: VPORT
-        name: vnf_additional_disk_based_image
-        description: VNF with additional volume based on image
-        id: additional_disk_based_image
-        # short-name: 2disks
-        vendor: ROtest
-        version: '1.0'
-        mgmt-interface:
-            cp: mgmt0
-        vdu:
-        -   id: VM1
-            name: VM1-name
-            image: US1604
-            alternative-images:
-            -   vim-type: openstack
-                image: cirros
-            -   vim-type: openvim
-                image: cirros034
-            volumes:
-            -   name: vdb
-                device-type: disk
-                image: cirros034
-                # image-checksum: 4a293322f18827af81a9450e3792947c
-                size: 8
-            interface:
-            -   name: iface11
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: mgmt0
-                mac-address:   "52:33:44:55:66:77"
-            vm-flavor:
-                memory-mb: '2048'
-                storage-gb: '8'
-                vcpu-count: '1'
diff --git a/test/RO_tests/no_port_security/scenario_vnf_no_port_security.yaml b/test/RO_tests/no_port_security/scenario_vnf_no_port_security.yaml
deleted file mode 100644 (file)
index 1f2e41b..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          vnf_no_port_security
-  description:   vnf_no_port_security
-  public:        false      # if available for other tenants
-  vnfs:
-    vnf_no_port_security:                     # vnf name in the scenario
-      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
-      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
-      vnf_name:  vnf_no_port_security   #can fail if several vnfs matches this name
-      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
-  networks:                
-    mgmt:
-      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
-      type:      bridge
-      external:  true       #this will be connected outside
-      interfaces:
-      -   vnf_no_port_security:  mgmt0
-
diff --git a/test/RO_tests/no_port_security/vnfd_no_port_security.yaml b/test/RO_tests/no_port_security/vnfd_no_port_security.yaml
deleted file mode 100644 (file)
index 11874ed..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name: vnf_no_port_security
-    description: VNF disabling port_security option in mgmt interface 
-    # class: parent      # Optional. Used to organize VNFs
-    external-connections:
-    -   name:              mgmt0
-        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
-        VNFC:              vnf_no_port_security # Virtual Machine this interface belongs to
-        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
-        description:       Management interface
-    VNFC:                              # Virtual machine array 
-    -   name:        vnf_no_port_security       # name of Virtual Machine
-        description: vnf_no_port_security
-        image name: ubuntu16.04
-        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
-        # processor:                     #Optional
-        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
-        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
-        # hypervisor:                    #Optional
-        #     type: QEMU-kvm
-        #     version: "10002|12001|2.6.32-358.el6.x86_64"
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk: 5          # disk size in GiB, by default 1
-        #numas: 
-        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
-        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
-        #    memory: 14                 # GBytes
-        #    interfaces: []
-        bridge-ifaces:
-        -   name:      mgmt0
-            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
-            bandwidth: 1 Mbps            # Optional. Informative only
-            port-security: False
-            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
-            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
-    # Additional Virtual Machines would be included here
-
diff --git a/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml b/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml
deleted file mode 100644 (file)
index 4dfd3c5..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          p2p_passthrough
-  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
-  vnfs: 
-    passthrough1:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-    passthrough2:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - passthrough1:  eth0       # Node and its interface
-      - passthrough2:  eth0       # Node and its interface
-    dataplane:                   # provide a name for this net or connection
-      interfaces: 
-      - passthrough1:  xe0       # Node and its interface
-      - passthrough2:  xe0       # Node and its interface
-
diff --git a/test/RO_tests/passthrough/vnfd_1passthrough.yaml b/test/RO_tests/passthrough/vnfd_1passthrough.yaml
deleted file mode 100644 (file)
index ab24adf..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        passthrough
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              passthrough-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              passthrough-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        passthrough-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml b/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml
deleted file mode 100644 (file)
index d243c0e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          p2p_passthrough
-  description:   Network scenario consisting of 4 machines with a passthrough interconnected between them
-  vnfs: 
-    passthrough1:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-    passthrough2:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-    passthrough3:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-    passthrough4:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - passthrough1:  eth0       # Node and its interface
-      - passthrough2:  eth0       # Node and its interface
-      - passthrough3:  eth0       # Node and its interface
-      - passthrough4:  eth0       # Node and its interface
-    dataplane:                   # provide a name for this net or connection
-      interfaces: 
-      - passthrough1:  xe0       # Node and its interface
-      - passthrough2:  xe0       # Node and its interface
-      - passthrough3:  xe0       # Node and its interface
-      - passthrough4:  xe0       # Node and its interface
-
diff --git a/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml b/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml
deleted file mode 100644 (file)
index ab24adf..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        passthrough
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              passthrough-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              passthrough-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        passthrough-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml b/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml
deleted file mode 100644 (file)
index 9e24552..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          p2p_sriov
-  description:   Network scenario consisting of four machines with a sr-iov interconnected between them
-  vnfs: 
-    sriov1:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    sriov2:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    sriov3:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    sriov4:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - sriov1:  eth0       # Node and its interface
-      - sriov2:  eth0       # Node and its interface
-      - sriov3:  eth0       # Node and its interface
-      - sriov4:  eth0       # Node and its interface
-    dataplane:                   # provide a name for this net or connection
-      interfaces: 
-      - sriov1:  xe0       # Node and its interface
-      - sriov2:  xe0       # Node and its interface
-      - sriov3:  xe0       # Node and its interface
-      - sriov4:  xe0       # Node and its interface
-
diff --git a/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml b/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml
deleted file mode 100644 (file)
index 2d4ad5b..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        sriov
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              sriov-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              sriov-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        sriov-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml b/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml
deleted file mode 100644 (file)
index 322c094..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          p2p_sriov_passthrough
-  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
-  vnfs: 
-    sriov1:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    passthrough1:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-    sriov2:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    passthrough2:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - sriov1:  eth0       # Node and its interface
-      - passthrough1:  eth0       # Node and its interface
-      - sriov2:  eth0       # Node and its interface
-      - passthrough2:  eth0       # Node and its interface
-    dataplane:                   # provide a name for this net or connection
-      interfaces: 
-      - sriov1:  xe0       # Node and its interface
-      - passthrough1:  xe0       # Node and its interface
-      - sriov2:  xe0       # Node and its interface
-      - passthrough2:  xe0       # Node and its interface
-
diff --git a/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml b/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml
deleted file mode 100644 (file)
index ab24adf..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        passthrough
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              passthrough-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              passthrough-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        passthrough-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml b/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml
deleted file mode 100644 (file)
index 2d4ad5b..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        sriov
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              sriov-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              sriov-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        sriov-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml b/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml
deleted file mode 100644 (file)
index e05d416..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          simple
-  description:   Simple network scenario consisting of two VNF connected to an external network
-  vnfs: 
-    linux1:                   # vnf/net name in the scenario
-      vnf_name:  linux        # VNF name as introduced in OPENMANO DB
-    linux2:                   # vnf/net name in the scenario
-      vnf_name:  linux        # VNF name as introduced in OPENMANO DB
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - linux1:  eth0       # Node and its interface
-      - linux2:  eth0       # Node and its interface
-
diff --git a/test/RO_tests/simple_2_vnf/vnfd_linux.yaml b/test/RO_tests/simple_2_vnf/vnfd_linux.yaml
deleted file mode 100644 (file)
index 45c670f..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        linux
-    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              linux-VM
-        local_iface_name:  eth0
-        description:       General purpose interface
-    VNFC:
-    -   name:        linux-VM
-        description: Generic Linux Virtual Machine
-        #Copy the image to a compute path and edit this path
-        image name:  image_name.qcow2
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk: 10
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:11.0"
-        numas: []
diff --git a/test/RO_tests/simple_cloud_init/scenario_simple-cloud-init.yaml b/test/RO_tests/simple_cloud_init/scenario_simple-cloud-init.yaml
deleted file mode 100644 (file)
index 77fc17e..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          simple-cloud-init
-  description:   Simple network scenario consisting of a single VNF connected to an external network
-  vnfs: 
-    linux1:                   # vnf/net name in the scenario
-      vnf_name:  linux-cloud-init       # VNF name as introduced in OPENMANO DB
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - linux1:  eth0       # Node and its interface
-
diff --git a/test/RO_tests/simple_cloud_init/vnfd_linux-cloud-init.yaml b/test/RO_tests/simple_cloud_init/vnfd_linux-cloud-init.yaml
deleted file mode 100644 (file)
index aa415cf..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
----
-schema_version: "0.2"
-vnf:
-    name:        linux-cloud-init
-    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
-    external-connections:
-    -   name:              eth0
-        type:              mgmt
-        description:       General purpose interface
-        VNFC:              linux-VM
-        local_iface_name:  eth0
-    VNFC:
-    -   name:        linux-VM
-        description: Generic Linux Virtual Machine
-        #Copy the image to a compute path and edit this path
-        image name:  ubuntu16.04
-        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
-        ram:   2048         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
-        disk:  20
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:11.0"
-        numas: []
-        boot-data: 
-            key-pairs: 
-            -  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
-            users:
-            -  name: atierno
-               key-pairs: 
-               -  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com
-            boot-data-drive: true
-            config-files: 
-            -   content: |
-                       auto enp0s3
-                       iface enp0s3 inet dhcp
-                dest: /etc/network/interfaces.d/enp0s3.cfg
-                permissions: '0644'
-                owner: root:root
-            -   content: |
-                       #! /bin/bash
-                       ls -al >> /var/log/osm.log
-                dest: /etc/rc.local
-                permissions: '0755'
-            -   content: "file content"
-                dest: /etc/test_delete
-
diff --git a/test/RO_tests/simple_count3/scenario_linux_count3.yaml b/test/RO_tests/simple_count3/scenario_linux_count3.yaml
deleted file mode 100644 (file)
index 0a4116c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          simple_count3
-  description:   Simple network scenario consisting of a multi VNFC VNF connected to an external network
-  vnfs: 
-    linux1:                   # vnf/net name in the scenario
-      vnf_name:  simple_linux_count3        # VNF name as introduced in OPENMANO DB
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - linux1:  control0       # Node and its interface
-    internal1:                   # provide a name for this net or connection
-      external:  false
-      interfaces: 
-      - linux1:  data-eth1
-
-
diff --git a/test/RO_tests/simple_count3/vnfd_count3.yaml b/test/RO_tests/simple_count3/vnfd_count3.yaml
deleted file mode 100644 (file)
index 712d392..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version: "0.2"
-vnf:
-    name:        simple_linux_count3
-    description: "Example of a linux VNF consisting of two VMs with one internal network"
-    # class: parent      # Optional. Used to organize VNFs
-    internal-connections:
-    -   name:        internal-eth2
-        description: internalnet
-        type:        e-lan
-        implementation: overlay
-        ip-profile:
-            ip-version:       IPv4
-            subnet-address:   192.168.1.0/24
-            gateway-address:  192.168.1.1
-            dns-address:      8.8.8.8
-            dhcp:
-                enabled: true
-                start-address: 192.168.1.100
-                count: 100
-        elements:
-        -   VNFC:             linux_3VMs
-            local_iface_name: eth2
-            ip_address:       192.168.1.2
-    external-connections:
-    -   name:              control0
-        type:              mgmt
-        VNFC:              linux_3VMs
-        local_iface_name:  eth0
-        description:       control interface VM1
-    -   name:              data-eth1
-        type:              bridge
-        VNFC:              linux_3VMs
-        local_iface_name:  eth1
-        description:       data interface input
-    VNFC:
-    -   name:        linux_3VMs
-        count:       3
-        description: "Linux VM1 2 CPUs, 2 GB RAM and 3 bridge interfaces"
-        #Copy the image to a compute path and edit this path
-        image name:  TestVM
-        disk: 10
-        vcpus: 2
-        ram: 2048
-        bridge-ifaces:
-        -   name:      eth0
-        -   name:      eth1
-        -   name:      eth2
diff --git a/test/RO_tests/simple_linux/scenario_simple_linux.yaml b/test/RO_tests/simple_linux/scenario_simple_linux.yaml
deleted file mode 100644 (file)
index b6cff70..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-nsd:nsd-catalog:
-    nsd:
-    -   id: simple
-        name: simple
-        vendor:      OSM
-        version:     '1.0'
-        description:   Simple network scenario consisting of a single VNF connected to an external network
-        constituent-vnfd:
-        # The member-vnf-index needs to be unique, starting from 1
-        # vnfd-id-ref is the id of the VNFD
-        # Multiple constituent VNFDs can be specified
-        -   member-vnf-index: 1
-            vnfd-id-ref: linux
-        vld:
-        # Networks for the VNFs
-        -   id: vld1
-            name: mgmt
-            short-name: vld1-sname
-            type: ELAN
-            mgmt-network: 'true'
-            vnfd-connection-point-ref:
-            -   member-vnf-index-ref: 1
-                vnfd-id-ref: linux
-                vnfd-connection-point-ref: eth0
-
diff --git a/test/RO_tests/simple_linux/vnfd_linux.yaml b/test/RO_tests/simple_linux/vnfd_linux.yaml
deleted file mode 100644 (file)
index a666124..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnfd-catalog:
-    vnfd:
-     -  id: linux
-        name: linux
-        description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
-        connection-point:
-        -   name: eth0
-            type: VPORT
-        vdu:
-        -   id: linux-VM
-            name: linux-VM
-            description: Generic Linux Virtual Machine
-            #Copy the image to a compute path and edit this path
-            image:  image_name.qcow2
-            vm-flavor:
-                  memory-mb: '1024'
-                  storage-gb: '10'
-                  vcpu-count: '1'
-            interface:
-            -   name: eth0
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:11.0"
-                external-connection-point-ref: eth0
diff --git a/test/RO_tests/simple_multi_vnfc/scenario_multi_vnfc.yaml b/test/RO_tests/simple_multi_vnfc/scenario_multi_vnfc.yaml
deleted file mode 100644 (file)
index 07b8902..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-nsd:nsd-catalog:
-    nsd:
-    -   id: simple_multi_vnfc
-        name: simple_multi_vnfc
-        vendor:      OSM
-        version:     '1.0'
-        description:   Simple network scenario consisting of a multi VNFC VNF connected to an external network
-        constituent-vnfd:
-        # The member-vnf-index needs to be unique, starting from 1
-        # vnfd-id-ref is the id of the VNFD
-        # Multiple constituent VNFDs can be specified
-        -   member-vnf-index: 1
-            vnfd-id-ref: linux_2VMs_v02
-        vld:
-        # Networks for the VNFs
-        -   id: vld1
-            name: mgmt
-            short-name: vld1-sname
-            type: ELAN
-            mgmt-network: 'true'
-            vnfd-connection-point-ref:
-            -   member-vnf-index-ref: 1
-                vnfd-id-ref: linux_2VMs_v02
-                vnfd-connection-point-ref: eth0
-            -   member-vnf-index-ref: 1
-                vnfd-id-ref: linux_2VMs_v02
-                vnfd-connection-point-ref: xe1
-
diff --git a/test/RO_tests/simple_multi_vnfc/vnfd_linux_2VMs_v02.yaml b/test/RO_tests/simple_multi_vnfc/vnfd_linux_2VMs_v02.yaml
deleted file mode 100644 (file)
index 8d541c6..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnfd-catalog:
-    vnfd:
-     -  id: linux_2VMs_v02
-        name: linux_2VMs_v02
-        description: "Example of a linux VNF consisting of two VMs with one internal network"
-        connection-point:
-        -   id: eth0
-            name: eth0
-            short-name: eth0
-            type: VPORT
-        -   id: xe1
-            name: xe1
-            short-name: xe1
-            type: VPORT
-        internal-vld:
-        -   id: internalnet
-            name: internalnet
-            short-name: internalnet
-            ip-profile-ref: ip-prof1
-            type: ELAN
-            internal-connection-point:
-            -   id-ref: VM1-xe0
-            -   id-ref: VM2-xe0
-        ip-profiles:
-        -   name: ip-prof1
-            description: IP profile
-            gateway-address:  192.168.1.1
-            dns-address: 8.8.8.8
-            #-   address: 8.8.8.8
-            ip-profile-params:
-            ip-version: ipv4
-            subnet-address: 192.168.1.0/24
-            dhcp-params:
-                enabled: true
-                start-address: 192.168.1.100
-                count: 100
-        vdu:
-        -   id: linux_2VMs-VM1
-            name: linux_2VMs-VM1
-            description: Generic Linux Virtual Machine
-            #Copy the image to a compute path and edit this path
-            image:  TestVM
-            vm-flavor:
-                  memory-mb: '2048'
-                  storage-gb: '10'
-                  vcpu-count: '4'
-            interface:
-            -   name: eth0
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:09.0"
-                external-connection-point-ref: eth0
-            -   name: xe0
-                type: INTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:11.0"
-                internal-connection-point-ref: VM1-xe0
-            -   name: xe1
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:12.0"
-                external-connection-point-ref: xe1
-            internal-connection-point:
-            - id: VM1-xe0
-              name: VM1-xe0
-              short-name: VM1-xe0
-              type: VPORT
-        -   id: linux_2VMs-VM2
-            name: linux_2VMs-VM2
-            description: Generic Linux Virtual Machine
-            #Copy the image to a compute path and edit this path
-            image:  TestVM
-            vm-flavor:
-                memory-mb: '2048'
-                storage-gb: '10'
-                vcpu-count: '4'
-            interface:
-            -   name: eth0
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:09.0"
-                external-connection-point-ref: eth0
-            -   name: xe0
-                type: INTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:11.0"
-                internal-connection-point-ref: VM2-xe0
-            -   name: xe1
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                    vpci:      "0000:00:12.0"
-                external-connection-point-ref: xe1
-            internal-connection-point:
-            -   id: VM2-xe0
-                name: VM2-xe0
-                short-name: VM2-xe0
-                type: VPORT
diff --git a/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml b/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml
deleted file mode 100644 (file)
index a0b888c..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          p2p_sriov
-  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
-  vnfs: 
-    sriov1:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    sriov2:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - sriov1:  eth0       # Node and its interface
-      - sriov2:  eth0       # Node and its interface
-    dataplane:                   # provide a name for this net or connection
-      interfaces: 
-      - sriov1:  xe0       # Node and its interface
-      - sriov2:  xe0       # Node and its interface
-
diff --git a/test/RO_tests/sr_iov/vnfd_1sriov.yaml b/test/RO_tests/sr_iov/vnfd_1sriov.yaml
deleted file mode 100644 (file)
index 2d4ad5b..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        sriov
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              sriov-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              sriov-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        sriov-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml b/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml
deleted file mode 100644 (file)
index 29bd4c8..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-schema_version:  2
-scenario:
-  name:          p2p_sriov_passthrough
-  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
-  vnfs: 
-    sriov:                   # vnf/net name in the scenario
-      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
-    passthrough:                   # vnf/net name in the scenario
-      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
-  networks: 
-    mgmt:                   # provide a name for this net or connection
-      external:  true
-      interfaces: 
-      - sriov:  eth0       # Node and its interface
-      - passthrough:  eth0       # Node and its interface
-    dataplane:                   # provide a name for this net or connection
-      interfaces: 
-      - sriov:  xe0       # Node and its interface
-      - passthrough:  xe0       # Node and its interface
-
diff --git a/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml b/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml
deleted file mode 100644 (file)
index ab24adf..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        passthrough
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              passthrough-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              passthrough-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        passthrough-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml b/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml
deleted file mode 100644 (file)
index 2d4ad5b..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
----
-vnf:
-    name:        sriov
-    description: Machine with EPA and a SR-IOV interface
-    external-connections:
-    -   name:              eth0
-        type:              bridge
-        VNFC:              sriov-VM
-        local_iface_name:  eth0
-        description:       management interface
-    -   name:              xe0
-        type:              data
-        VNFC:              sriov-VM
-        local_iface_name:  xe0
-        description:       Dataplane interface
-    VNFC:
-    -   name:        sriov-VM
-        description: Machine with EPA and a SR-IOV interface
-        image name:  centos
-        disk: 20
-        numas: 
-        -   threads: 1          # "cores", "paired-threads", "threads"
-            memory: 1                 # GBytes
-            interfaces:
-            -   name:      xe0
-                vpci:      "0000:00:11.0"
-                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
-                bandwidth: 1 Gbps
-
-        bridge-ifaces:
-        -   name:      eth0
-            vpci:      "0000:00:0a.0"
-
diff --git a/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml b/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml
deleted file mode 100644 (file)
index fb76079..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-nsd:nsd-catalog:
-    nsd:
-    -   id:          test_2vdu_nsd
-        name:        test_2vdu_nsd_name
-        short-name:  test_2vdu_nsd_sname
-        description: 2 vnfs, eatch one with 3 cirros vdu
-        vendor:      OSM
-        version:     '1.0'
-
-        # Place the logo as png in icons directory and provide the name here
-        logo:        osm_2x.png
-
-        # Specify the VNFDs that are part of this NSD
-        constituent-vnfd:
-            # The member-vnf-index needs to be unique, starting from 1
-            # vnfd-id-ref is the id of the VNFD
-            # Multiple constituent VNFDs can be specified
-        -   member-vnf-index: 1
-            vnfd-id-ref: test_2vdu
-        -   member-vnf-index: 2
-            vnfd-id-ref: test_2vdu2
-
-        ip-profiles:
-        -   description: Inter VNF Link
-            ip-profile-params:
-                gateway-address: 10.31.31.254
-                ip-version:      ipv4
-                subnet-address:  10.31.31.0/24
-                dns-server:
-                -   address: 8.8.8.8
-                -   address: 8.8.8.9 
-                dhcp-params:
-                  count: 200
-                  start-address: 10.31.31.20
-            name: ipprofileA
-        -   description: IP profile that disables dhcp server
-            ip-profile-params:
-                dhcp-params:
-                    enabled: 'false'
-                ip-version: ipv4
-            name: no_dhcp
-
-        vld:
-        # Networks for the VNFs
-        -   id:         vld1
-            name:       mgmt
-            short-name: vld1-sname
-            type:       ELAN
-            mgmt-network: 'true'
-            vnfd-connection-point-ref:
-            -   member-vnf-index-ref: 1
-                vnfd-id-ref: test_2vdu
-                vnfd-connection-point-ref: eth0
-            -   member-vnf-index-ref: 2
-                vnfd-id-ref: test_2vdu2
-                vnfd-connection-point-ref: eth0
-
-        -   id:         vld2
-            name:       nsd-vld2
-            short-name: vld2-sname
-            type:       ELAN
-            ip-profile-ref: ipprofileA
-            vnfd-connection-point-ref:
-            -   member-vnf-index-ref:      1
-                vnfd-id-ref:               test_2vdu
-                vnfd-connection-point-ref: eth1
-                ip-address:                10.31.31.4
-            -   member-vnf-index-ref:      2
-                vnfd-id-ref:               test_2vdu2
-                vnfd-connection-point-ref: eth1
-                ip-address:                10.31.31.5
-
-        -   id:         vld3
-            name:       nsd-vld3
-            short-name: vld3-sname
-            type:       ELAN
-            ip-profile-ref: no_dhcp
-            vnfd-connection-point-ref:
-            -   member-vnf-index-ref:      1
-                vnfd-id-ref:               test_2vdu
-                vnfd-connection-point-ref: eth4
-            -   member-vnf-index-ref:      2
-                vnfd-id-ref:               test_2vdu2
-                vnfd-connection-point-ref: eth4
diff --git a/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml b/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml
deleted file mode 100644 (file)
index e790a9c..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-vnfd-catalog:
-    vnfd:
-    -   connection-point:
-        -   name: eth0
-            type: VPORT
-        -   name: eth1
-            type: VPORT
-        -   name: eth4
-            type: VPORT
-        description: VNF with internal VLD and set IP and mac
-        id: test_2vdu
-        name: test_2vdu_name
-        short-name: test_2vdu_sname
-        mgmt-interface:
-            cp: eth0
-        internal-vld:
-        -   description: Internal VL
-            id:          net_internal
-            name:        internal_vld1
-            short-name:  net_internal_sname
-            type:        ELAN
-            internal-connection-point:
-            -   id-ref:     eth2
-                ip-address: 10.10.135.4
-            -   id-ref:     eth3
-                ip-address: 10.10.135.5
-            ip-profile-ref: ip-profile1
-        ip-profiles:
-        -   description: Inter VNF Link
-            ip-profile-params:
-                gateway-address: null
-                ip-version:      ipv4
-                subnet-address:  10.10.135.0/24
-                dhcp-params:
-                  count:         100
-                  start-address: 10.10.135.20
-            name: ip-profile1
-        vdu:
-        -   id: VM1
-            name: VM11
-            image: US1604
-            interface:
-            -   name: iface11
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: eth0
-                mac-address:   "52:33:44:55:66:77"
-            -   name: iface12
-                type: INTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                internal-connection-point-ref: eth2
-                mac-address:   "52:33:44:55:66:78"
-            -   name: iface13
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: eth4
-            internal-connection-point:
-            -   name: eth2-icp
-                id:   eth2
-                type: VPORT
-            vm-flavor:
-                memory-mb: '2048'
-                storage-gb: '8'
-                vcpu-count: '1'
-        -   id: VM2
-            image: US1604
-            name: VM12
-            interface:
-            -   name: iface21
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: eth1
-                mac-address:   52:33:44:55:66:79
-            -   name: iface22
-                type: INTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                internal-connection-point-ref: eth3
-                mac-address:   52:33:44:55:66:80
-            internal-connection-point:
-            -   name: eth3-icp
-                id:   eth3
-                type: VPORT
-            vm-flavor:
-                memory-mb: '2048'
-                storage-gb: '8'
-                vcpu-count: '1'
-        vendor: ROtest
-        version: '1.0'
diff --git a/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml b/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml
deleted file mode 100644 (file)
index 6c4b6cf..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-vnfd-catalog:
-    vnfd:
-    -   connection-point:
-        -   name: eth0
-            type: VPORT
-        -   name: eth1
-            type: VPORT
-        -   name: eth4
-            type: VPORT
-        description: VNF with internal VLD and set IP and mac
-        id: test_2vdu2
-        name: test_2vdu2_name
-        short-name: test_2vdu2_sname
-        mgmt-interface:
-            cp: eth0
-        internal-vld:
-        -   description: Internal VL
-            id:          net_internal
-            name:        internal_vld2
-            short-name:  net_internal_sname
-            type:        ELAN
-            internal-connection-point:
-            -   id-ref:     eth2
-                ip-address: 10.10.133.4
-            -   id-ref:     eth3
-                ip-address: 10.10.133.5
-            ip-profile-ref: ip-profile1
-        ip-profiles:
-        -   description: Inter VNF Link
-            ip-profile-params:
-                gateway-address: 10.10.133.1
-                ip-version:      ipv4
-                subnet-address:  10.10.133.0/24
-                dhcp-params:
-                  count:         200
-                  start-address: 10.10.133.20
-            name: ip-profile1
-        vdu:
-        -   id: VM1
-            name: VM21
-            image: US1604
-            interface:
-            -   name: iface11
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: eth0
-                mac-address:   "52:33:44:55:66:81"
-            -   name: iface12
-                type: INTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                internal-connection-point-ref: eth2
-                mac-address:   "52:33:44:55:66:82"
-            -   name: iface13
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: eth4
-            internal-connection-point:
-            -   name: eth2-icp
-                id:   eth2
-                type: VPORT
-            vm-flavor:
-                memory-mb: '2048'
-                storage-gb: '8'
-                vcpu-count: '1'
-        -   id: VM2
-            image: US1604
-            name: VM22
-            interface:
-            -   name: iface21
-                type: EXTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                external-connection-point-ref: eth1
-                mac-address:   52:33:44:55:66:83
-            -   name: iface22
-                type: INTERNAL
-                virtual-interface:
-                    type: VIRTIO
-                internal-connection-point-ref: eth3
-                mac-address:   52:33:44:55:66:84
-            internal-connection-point:
-            -   name: eth3-icp
-                id:   eth3
-                type: VPORT
-            vm-flavor:
-                memory-mb: '2048'
-                storage-gb: '8'
-                vcpu-count: '1'
-        vendor: ROtest
-        version: '1.0'
diff --git a/test/basictest.sh b/test/basictest.sh
deleted file mode 100755 (executable)
index 8f5a225..0000000
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#This script can be used as a basic test of openmano.
-#WARNING: It destroy the database content
-
-
-function usage(){
-    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test openmano using openvim as a VIM"
-    echo -e "           the OPENVIM_HOST, OPENVIM_PORT shell variables indicate openvim location"
-    echo -e "           by default localhost:9080"
-    echo -e "  <action> is a list of the following items (by default 'reset add-openvim create delete del-openvim')"
-    echo -e "    reset       resets the openmano database content and creates osm tenant"
-    echo -e "    add-openvim adds and attaches a local openvim datacenter"
-    echo -e "    del-openvim detaches and deletes the local openvim datacenter"
-    echo -e "    create      creates VNFs, scenarios and instances"
-    echo -e "    delete      deletes the created instances, scenarios and VNFs"
-    echo -e "    delete-all  deletes ALL the existing instances, scenarios and vnf at the current tenant"
-    echo -e "  OPTIONS:"
-    echo -e "    -f --force       does not prompt for confirmation"
-    echo -e "    -h --help        shows this help"
-    echo -e "    --screen         forces to run openmano (and openvim) service in a screen"
-    echo -e "    --insert-bashrc  insert the created tenant,datacenter variables at"
-    echo -e "                     ~/.bashrc to be available by openmano CLI"
-    echo -e "    --install-openvim   installs openvim in test mode"
-    echo -e "    --init-openvim --initopenvim    if openvim runs locally, initopenvim is called to clean openvim"\
-            "database, create osm tenant and add fake hosts"
-}
-
-function is_valid_uuid(){
-    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
-    return 1
-}
-
-#detect if is called with a source to use the 'exit'/'return' command for exiting
-DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-DIRmano=$(dirname $DIRNAME)
-DIRscript=${DIRmano}/scripts
-
-#detect paths of executables, preceding the relative paths
-openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
-service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
-    service_openmano="$DIRscript/service-openmano"
-initopenvim="initopenvim"
-openvim="openvim"
-
-[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
-
-
-#process options
-source ${DIRscript}/get-options.sh "force:f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
-                $* || $_exit 1
-
-#help
-[ -n "$option_help" ] && usage && $_exit 0
-
-#check correct arguments
-force_param="" && [[ -n "$option_force" ]] && force_param=" -f"
-insert_bashrc_param="" && [[ -n "$option_insert_bashrc" ]] && insert_bashrc_param=" --insert-bashrc"
-screen_mano_param="" && [[ -n "$option_screen" ]] && screen_mano_param=" --screen-name=mano" 
-screen_vim_param=""  && [[ -n "$option_screen" ]] && screen_vim_param=" --screen-name=vim" 
-
-action_list=""
-
-for argument in $params
-do
-    if [[ $argument == reset ]] || [[ $argument == create ]] || [[ $argument == delete ]] ||
-       [[ $argument == add-openvim ]] || [[ $argument == del-openvim ]] ||  [[ $argument == delete-all ]] ||
-       [[ -z "$argument" ]]
-    then
-        action_list="$action_list $argument"
-        continue
-    fi
-    echo "invalid argument '$argument'?  Type -h for help" >&2 && $_exit 1
-done
-
-export OPENMANO_HOST=localhost
-export OPENMANO_PORT=9090
-[[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_HOST=localhost"  >> ~/.bashrc
-[[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_PORT=9090"  >> ~/.bashrc
-
-
-#by default action should be reset and create
-[[ -z $action_list ]]  && action_list="reset add-openvim create delete del-openvim"
-
-if [[ -n "$option_install_openvim" ]] 
-then
-    echo
-    echo "action: install openvim"
-    echo "################################"
-    mkdir -p ${DIRNAME}/local
-    pushd ${DIRNAME}/local
-    echo "installing openvim at  ${DIRNAME}/openvim ... "
-    wget -O install-openvim.sh "https://osm.etsi.org/gitweb/?p=osm/openvim.git;a=blob_plain;f=scripts/install-openvim.sh"
-    chmod +x install-openvim.sh
-    sudo ./install-openvim.sh --no-install-packages --force --quiet --develop
-    openvim="${DIRNAME}/local/openvim/openvim"
-    #force inito-penvim
-    option_init_openvim="-"
-    initopenvim="${DIRNAME}/local/openvim/scripts/initopenvim"
-    popd
-fi
-
-if [[ -n "$option_init_openvim" ]]
-then
-    echo
-    echo "action: init openvim"
-    echo "################################"
-    ${initopenvim} ${force_param}${insert_bashrc_param}${screen_vim_param} || \
-        echo "WARNING openvim cannot be initialized. The rest of test can fail!"
-fi
-
-#check openvim client variables are set
-#fail=""
-#[[ -z $OPENVIM_HOST ]] && echo "OPENVIM_HOST variable not defined" >&2 && fail=1
-#[[ -z $OPENVIM_PORT ]] && echo "OPENVIM_PORT variable not defined" >&2 && fail=1
-#[[ -n $fail ]] && $_exit 1
-
-
-for action in $action_list
-do
-    echo
-    echo "action: $action"
-    echo "################################"
-#if [[ $action == "install-openvim" ]]
-    #echo "Installing and starting openvim"
-    #mkdir -p temp
-    #pushd temp
-    #wget https://github.com/nfvlabs/openvim/raw/v0.4/scripts/install-openvim.sh
-    #chmod -x install-openvim.sh
-#fi
-
-if [[ $action == "reset" ]]
-then
-
-    #ask for confirmation if argument is not -f --force
-    force_=y
-    [[ -z "$option_force" ]] && read -e -p "WARNING: reset openmano database, content will be lost!!! Continue(y/N) " force_
-    [[ $force_ != y ]] && [[ $force_ != yes ]] && echo "aborted!" && $_exit
-
-    echo "Stopping openmano"
-    $service_openmano mano stop${screen_mano_param}
-    echo "Initializing openmano database"
-    $DIRmano/database_utils/init_mano_db.sh -u mano -p manopw
-    echo "Starting openmano"
-    $service_openmano mano start${screen_mano_param}
-    echo
-    printf "%-50s" "Creating openmano tenant 'osm': "
-    result=`$openmano tenant-create osm --description="created by basictest.sh"`
-    nfvotenant=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $nfvotenant && echo "FAIL" && echo "    $result" && $_exit 1
-    export OPENMANO_TENANT=osm
-    [[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_TENANT=osm"  >> ~/.bashrc
-    echo $nfvotenant
-
-elif [[ $action == "delete" ]]
-then
-    result=`openmano tenant-list osm`
-    nfvotenant=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    is_valid_uuid $nfvotenant || ! echo "Tenant osm not found. Already delete?" >&2 || $_exit 1
-    export OPENMANO_TENANT=$nfvotenant
-    $openmano instance-scenario-delete -f simple-instance     || echo "fail"
-    $openmano instance-scenario-delete -f complex-instance    || echo "fail"
-    $openmano instance-scenario-delete -f complex2-instance   || echo "fail"
-    $openmano instance-scenario-delete -f complex3-instance   || echo "fail"
-    $openmano instance-scenario-delete -f complex4-instance   || echo "fail"
-    $openmano instance-scenario-delete -f complex5-instance   || echo "fail"
-    $openmano instance-scenario-delete -f 3vdu_2vnf_nsd-instance       || echo "fail"
-    $openmano scenario-delete -f simple           || echo "fail"
-    $openmano scenario-delete -f complex          || echo "fail"
-    $openmano scenario-delete -f complex2         || echo "fail"
-    $openmano scenario-delete -f complex3         || echo "fail"
-    $openmano scenario-delete -f complex4         || echo "fail"
-    $openmano scenario-delete -f complex5         || echo "fail"
-    $openmano scenario-delete -f osm_id=3vdu_2vnf_nsd  || echo "fail"
-    $openmano vnf-delete -f linux                 || echo "fail"
-    $openmano vnf-delete -f linux_2VMs_v02        || echo "fail"
-    $openmano vnf-delete -f dataplaneVNF_2VMs     || echo "fail"
-    $openmano vnf-delete -f dataplaneVNF_2VMs_v02 || echo "fail"
-    $openmano vnf-delete -f dataplaneVNF1         || echo "fail"
-    $openmano vnf-delete -f dataplaneVNF2         || echo "fail"
-    $openmano vnf-delete -f dataplaneVNF3         || echo "fail"
-    $openmano vnf-delete -f dataplaneVNF4         || echo "fail"
-    $openmano vnf-delete -f osm_id=3vdu_vnfd      || echo "fail"
-
-elif [[ $action == "delete-all" ]]
-then
-    for i in instance-scenario scenario vnf
-    do
-        for f in `$openmano $i-list | awk '{print $1}'`
-        do
-            [[ -n "$f" ]] && [[ "$f" != No ]] && $openmano ${i}-delete -f ${f}
-        done
-    done
-
-elif [[ $action == "del-openvim" ]]
-then
-    $openmano datacenter-detach local-openvim           || echo "fail"
-    $openmano datacenter-delete -f local-openvim        || echo "fail"
-
-elif [[ $action == "add-openvim" ]]
-then
-
-    printf "%-50s" "Creating datacenter 'local-openvim' at openmano:"
-    [[ -z $OPENVIM_HOST ]] && OPENVIM_HOST=localhost
-    [[ -z $OPENVIM_PORT ]] && OPENVIM_PORT=9080
-    URL_ADMIN_PARAM=""
-    [[ -n $OPENVIM_ADMIN_PORT ]] && URL_ADMIN_PARAM=" --url_admin=http://${OPENVIM_HOST}:${OPENVIM_ADMIN_PORT}/openvim"
-    result=`$openmano datacenter-create local-openvim "http://${OPENVIM_HOST}:${OPENVIM_PORT}/openvim" \
-            --type=openvim${URL_ADMIN_PARAM} --config="{test: no use just for test}"`
-    datacenter=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && $_exit 1
-    echo $datacenter
-    export OPENMANO_DATACENTER=local-openvim
-    [[ -n "$option_insert_bashrc" ]] && echo -e "\nexport OPENMANO_DATACENTER=local-openvim"  >> ~/.bashrc
-
-    printf "%-50s" "Attaching openmano tenant to the datacenter:"
-    result=`$openmano datacenter-attach local-openvim --vim-tenant-name=osm --config="{test: no use just for test}"`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result" && $_exit 1
-    echo OK
-
-    printf "%-50s" "Updating external nets in openmano: "
-    result=`$openmano datacenter-netmap-delete -f --all`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
-    result=`$openmano datacenter-netmap-import -f`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
-    echo OK
-    result=`$openmano datacenter-netmap-create --name=default --vim-name=mgmt`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
-    echo OK
-
-elif [[ $action == "create" ]]
-then
-    for VNF in linux dataplaneVNF1 dataplaneVNF2 dataplaneVNF_2VMs dataplaneVNF_2VMs_v02 dataplaneVNF3 linux_2VMs_v02 dataplaneVNF4
-    do    
-        printf "%-50s" "Creating VNF '${VNF}': "
-        result=`$openmano vnf-create $DIRmano/vnfs/examples/${VNF}.yaml`
-        vnf=`echo $result |gawk '{print $1}'`
-        #check a valid uuid is obtained
-        ! is_valid_uuid $vnf && echo FAIL && echo "    $result" &&  $_exit 1
-        echo $vnf
-    done
-
-    printf "%-50s" "Creating VNF '${VNF}': "
-    result=`$openmano vnf-create $DIRmano/vnfs/examples/v3_3vdu_vnfd.yaml --image-name=cirros034`
-    vnf=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" &&  $_exit 1
-    echo $vnf
-
-    for NS in simple complex complex2 complex3 complex4 complex5 v3_3vdu_2vnf_nsd
-    do
-        printf "%-50s" "Creating scenario '${NS}':"
-        result=`$openmano scenario-create $DIRmano/scenarios/examples/${NS}.yaml`
-        scenario=`echo $result |gawk '{print $1}'`
-        ! is_valid_uuid $scenario && echo FAIL && echo "    $result" &&  $_exit 1
-        echo $scenario
-    done
-
-    for IS in simple complex complex2 complex3 complex5 osm_id=3vdu_2vnf_nsd
-    do
-        printf "%-50s" "Creating instance-scenario '${IS}':"
-        result=`$openmano instance-scenario-create  --scenario ${IS} --name ${IS#osm_id=}-instance`
-        instance=`echo $result |gawk '{print $1}'`
-        ! is_valid_uuid $instance && echo FAIL && echo "    $result" &&  $_exit 1
-        echo $instance
-    done
-
-    printf "%-50s" "Creating instance-scenario 'complex4':"
-    result=`$openmano instance-scenario-create $DIRmano/instance-scenarios/examples/instance-creation-complex4.yaml`
-    instance=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $instance && echo FAIL && echo "    $result" &&  $_exit 1
-    echo $instance
-
-    echo
-    #echo "Check virtual machines are deployed"
-    #vms_error=`openvim vm-list | grep ERROR | wc -l`
-    #vms=`openvim vm-list | wc -l`
-    #[[ $vms -ne 8 ]]       &&  echo "WARNING: $vms VMs created, must be 8 VMs" >&2 && $_exit 1
-    #[[ $vms_error -gt 0 ]] &&  echo "WARNING: $vms_error VMs with ERROR" >&2       && $_exit 1
-fi
-done
-
-echo
-echo DONE
-
-
diff --git a/test/test-multivim.sh b/test/test-multivim.sh
deleted file mode 100755 (executable)
index 077717d..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#This script is a basic test for openmano, that deals with two openvim
-#stopping on an error
-#WARNING: It destroy the database content
-
-
-function usage(){
-    echo -e "usage: ${BASH_SOURCE[0]} [-f]\n  Deletes openvim/openmano content and make automatically the wiki steps"
-    echo -e "  at 'https://github.com/nfvlabs/openmano/wiki/Getting-started#how-to-use-it'"
-    echo -e "  OPTIONS:"
-    echo -e "    -f --force : does not prompt for confirmation"
-    echo -e "    -h --help  : shows this help"
-}
-
-function is_valid_uuid(){
-    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
-    return 1
-}
-
-
-#detect if is called with a source to use the 'exit'/'return' command for exiting
-[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
-
-#check correct arguments
-[[ -n $1 ]] && [[ $1 != -h ]] && [[ $1 != --help ]] && [[ $1 != -f ]] && [[ $1 != --force ]] && \
-   echo "invalid argument '$1'?" &&  usage >&2 && $_exit 1
-[[ $1 == -h ]] || [[ $1 == --help ]]  && usage && $_exit 0
-
-#ask for confirmation if argument is not -f --force
-force=""
-[[ $1 == -f ]] || [[ $1 == --force ]] && force=y
-[[ $force != y ]] && read -e -p "WARNING: openmano and openvim database content will be lost!!!  Continue(y/N)" force
-[[ $force != y ]] && [[ $force != yes ]] && echo "aborted!" && $_exit
-
-DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-DIR_BASE=$(dirname $DIRNAME)
-DIR_BASE=$(dirname $DIR_BASE)
-DIRvim=$DIR_BASE/openvim
-DIRmano=$DIR_BASE/openmano
-DIRscripts=$DIR_BASE/scripts
-
-echo "deleting deployed vm"
-openvim vm-delete -f | grep -q deleted && sleep 10 #give some time to get virtual machines deleted
-
-echo "Stopping openmano"
-$DIRscripts/service-openmano stop
-
-echo "Initializing databases"
-$DIRvim/database_utils/init_vim_db.sh -u vim -p vimpw
-$DIRmano/database_utils/init_mano_db.sh -u mano -p manopw
-
-echo "Starting openmano"
-$DIRscripts/service-openmano start
-
-echo "Creating openmano tenant 'mytenant'"
-nfvotenant=`openmano tenant-create mytenant --description=mytenant |gawk '{print $1}'`
-#check a valid uuid is obtained
-is_valid_uuid $nfvotenant || ! echo "fail" >&2 || $_exit 1 
-export OPENMANO_TENANT=$nfvotenant
-echo "  $nfvotenant"
-
-echo "Adding example hosts"
-openvim host-add $DIRvim/test/hosts/host-example0.json || ! echo "fail" >&2 || $_exit 1
-openvim host-add $DIRvim/test/hosts/host-example1.json || ! echo "fail" >&2 || $_exit 1
-openvim host-add $DIRvim/test/hosts/host-example2.json || ! echo "fail" >&2 || $_exit 1
-openvim host-add $DIRvim/test/hosts/host-example3.json || ! echo "fail" >&2 || $_exit 1
-echo "Adding example nets"
-openvim net-create $DIRvim/test/networks/net-example0.yaml || ! echo "fail" >&2 || $_exit 1
-openvim net-create $DIRvim/test/networks/net-example1.yaml || ! echo "fail" >&2 || $_exit 1
-openvim net-create $DIRvim/test/networks/net-example2.yaml || ! echo "fail" >&2 || $_exit 1
-openvim net-create $DIRvim/test/networks/net-example3.yaml || ! echo "fail" >&2 || $_exit 1
-
-echo "Creating openvim tenant 'admin'"
-vimtenant=`openvim tenant-create '{"tenant": {"name":"admin", "description":"admin"}}' |gawk '{print $1}'`
-#check a valid uuid is obtained
-is_valid_uuid $vimtenant || ! echo "fail" >&2 || $_exit 1
-echo "  $vimtenant"
-OPENVIM_TENANT_1=$vimtenant && export OPENVIM_TENANT=$vimtenant
-
-echo "Creating datacenter 'mydc1' in openmano"
-datacenter=`openmano datacenter-create mydc1 http://localhost:9080/openvim |gawk '{print $1}'`
-#check a valid uuid is obtained
-is_valid_uuid $datacenter || ! echo "fail" >&2 || $_exit 1 
-echo "  $datacenter"
-OPENMANO_DATACENTER_1=$datacenter && export OPENMANO_DATACENTER=$datacenter
-
-echo "Attaching openmano tenant to the datacenter and the openvim tenant"
-openmano datacenter-attach mydc1 --vim-tenant-id $vimtenant || ! echo "fail" >&2 || $_exit 1 
-
-echo "Updating external nets in openmano"
-openmano datacenter-net-update -f mydc1 || ! echo "fail" >&2 || $_exit 1
-
-echo "Creating a second fake datacenter 'mydc2' in openmano"
-datacenter2=`openmano datacenter-create mydc2 http://localhost:9082/openvim |gawk '{print $1}'`
-#check a valid uuid is obtained
-is_valid_uuid $datacenter || ! echo "fail" >&2 || $_exit 1 
-echo "  $datacenter2"
-OPENMANO_DATACENTER_2=$datacenter2
-echo "Attaching a second fake openvim 'mydc2'"
-openmano datacenter-attach mydc2 --vim-tenant-id $vimtenant || ! echo "fail" >&2 || $_exit 1
-
-echo "Creating VNFs, must fail in second openvim"
-openmano vnf-create $DIRmano/vnfs/examples/linux.yaml         || ! echo "fail" >&2 || $_exit 1
-openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF1.yaml || ! echo "fail" >&2 || $_exit 1
-openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF2.yaml || ! echo "fail" >&2 || $_exit 1
-
-echo "Checking images and flavors created at openvim"
-nb=`openvim image-list | wc -l`
-echo -n " $nb images "
-[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
-echo " $nb flavors "
-[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
-
-echo "Creating Scenarios"
-openmano scenario-create $DIRmano/scenarios/examples/simple.yaml  || ! echo "fail" >&2 || $_exit 1
-openmano scenario-create $DIRmano/scenarios/examples/complex.yaml || ! echo "fail" >&2 || $_exit 1
-
-echo "Deleting openvim images and flavors to force reload again"
-openvim image-delete -f
-openvim flavor-delete -f
-
-echo "Launching scenarios"
-openmano scenario-deploy simple simple-instance   || ! echo "fail" >&2 || $_exit 1
-openmano scenario-deploy complex complex-instance || ! echo "fail" >&2 || $_exit 1
-
-echo "Checking that openvim has 5 VM running"
-nb=`openvim vm-list | wc -l`
-[[ $nb -eq 5 ]] || ! echo "fail" >&2 || $_exit 1
-while openvim vm-list | grep -q CREATING ; do sleep 1; done
-openvim vm-list | grep -v -q ERROR || ! echo "fail: VM with error" >&2 || $_exit 1
-
-echo "Removing scenarios"
-for scenario in `openmano instance-scenario-list  | awk '{print $2}'`
-do
-  openmano instance-scenario-delete -f $scenario
-done
-
-echo "Editing datacenters so that Changing openvim Working with the second openvim"
-openmano datacenter-edit -f mydc1 'vim_url: http://localhost:9083/openvim'
-openmano datacenter-edit -f mydc2 'vim_url: http://localhost:9080/openvim'
-export OPENMANO_DATACENTER=$OPENMANO_DATACENTER_2
-
-echo "Updating external nets in openmano for second datacenter"
-openmano datacenter-net-update -f mydc2 || ! echo "fail" >&2 || $_exit 1
-
-echo "Launching Scenario instances"
-openmano scenario-deploy simple simple-instance   || ! echo "fail" >&2 || $_exit 1
-openmano scenario-deploy complex complex-instance || ! echo "fail" >&2 || $_exit 1
-
-echo "Checking images and flavors created at openvim"
-nb=`openvim image-list | wc -l`
-echo -n " $nb images "
-[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
-echo " $nb flavors "
-[[ $nb -eq 3 ]] || ! echo "fail" >&2 || $_exit 1
-
-echo "Checking that openvim has 5 VM running"
-nb=`openvim vm-list | wc -l`
-[[ $nb -eq 5 ]] || ! echo "fail" >&2 || $_exit 1
-while openvim vm-list | grep -q CREATING ; do sleep 1; done
-openvim vm-list | grep -v -q ERROR || ! echo "fail: VM with error" >&2 || $_exit 1
-
-
-echo
-echo DONE
-#echo "Listing VNFs"
-#openmano vnf-list
-#echo "Listing scenarios"
-#openmano scenario-list
-#echo "Listing scenario instances"
-#openmano instance-scenario-list
-
-
diff --git a/test/test_RO.py b/test/test_RO.py
deleted file mode 100755 (executable)
index 932853c..0000000
+++ /dev/null
@@ -1,2549 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2017
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
-
-"""
-Module for testing openmano functionality. It uses openmanoclient.py for invoking openmano
-"""
-
-import logging
-import os
-import argcomplete
-import unittest
-import string
-import inspect
-import random
-# import traceback
-import glob
-import yaml
-import sys
-import time
-import uuid
-from argparse import ArgumentParser
-
-__author__ = "Pablo Montes, Alfonso Tierno"
-__date__ = "$16-Feb-2017 17:08:16$"
-__version__ = "0.1.0"
-version_date = "Oct 2017"
-
-test_config = {}    # used for global variables with the test configuration
-
-
-class test_base(unittest.TestCase):
-    test_index = 1
-    test_text = None
-
-    @classmethod
-    def setUpClass(cls):
-        logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
-
-    @classmethod
-    def tearDownClass(cls):
-        test_config["test_number"] += 1
-
-    def tearDown(self):
-        exec_info = sys.exc_info()
-        if exec_info == (None, None, None):
-            logger.info(self.__class__.test_text+" -> TEST OK")
-        else:
-            logger.warning(self.__class__.test_text+" -> TEST NOK")
-            logger.critical("Traceback error",exc_info=True)
-
-
-def check_instance_scenario_active(uuid):
-    instance = test_config["client"].get_instance(uuid=uuid)
-
-    for net in instance['nets']:
-        status = net['status']
-        if status != 'ACTIVE':
-            return (False, status)
-
-    for vnf in instance['vnfs']:
-        for vm in vnf['vms']:
-            status = vm['status']
-            if status != 'ACTIVE':
-                return (False, status)
-
-    return (True, None)
-
-
-'''
-IMPORTANT NOTE
-All unittest classes for code based tests must have prefix 'test_' in order to be taken into account for tests
-'''
-class test_VIM_datacenter_tenant_operations(test_base):
-    tenant_name = None
-
-    def test_000_create_RO_tenant(self):
-        self.__class__.tenant_name = _get_random_string(20)
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        tenant = test_config["client"].create_tenant(name=self.__class__.tenant_name,
-                                                     description=self.__class__.tenant_name)
-        logger.debug("{}".format(tenant))
-        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
-
-    def test_010_list_RO_tenant(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        tenant = test_config["client"].get_tenant(name=self.__class__.tenant_name)
-        logger.debug("{}".format(tenant))
-        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
-
-    def test_020_delete_RO_tenant(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        tenant = test_config["client"].delete_tenant(name=self.__class__.tenant_name)
-        logger.debug("{}".format(tenant))
-        assert('deleted' in tenant.get('result',""))
-
-
-class test_VIM_datacenter_operations(test_base):
-    datacenter_name = None
-
-    def test_000_create_datacenter(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.datacenter_name = _get_random_string(20)
-        self.__class__.test_index += 1
-        self.datacenter = test_config["client"].create_datacenter(name=self.__class__.datacenter_name,
-                                                                  vim_url="http://fakeurl/fake")
-        logger.debug("{}".format(self.datacenter))
-        self.assertEqual (self.datacenter.get('datacenter', {}).get('name',''), self.__class__.datacenter_name)
-
-    def test_010_list_datacenter(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        self.datacenter = test_config["client"].get_datacenter(all_tenants=True, name=self.__class__.datacenter_name)
-        logger.debug("{}".format(self.datacenter))
-        self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
-
-    def test_020_attach_datacenter(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        self.datacenter = test_config["client"].attach_datacenter(name=self.__class__.datacenter_name,
-                                                                  vim_tenant_name='fake')
-        logger.debug("{}".format(self.datacenter))
-        assert ('uuid' in self.datacenter.get('datacenter', {}))
-
-    def test_030_list_attached_datacenter(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        self.datacenter = test_config["client"].get_datacenter(all_tenants=False, name=self.__class__.datacenter_name)
-        logger.debug("{}".format(self.datacenter))
-        self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
-
-    def test_040_detach_datacenter(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        self.datacenter = test_config["client"].detach_datacenter(name=self.__class__.datacenter_name)
-        logger.debug("{}".format(self.datacenter))
-        assert ('detached' in self.datacenter.get('result', ""))
-
-    def test_050_delete_datacenter(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        self.datacenter = test_config["client"].delete_datacenter(name=self.__class__.datacenter_name)
-        logger.debug("{}".format(self.datacenter))
-        assert('deleted' in self.datacenter.get('result',""))
-
-
-class test_VIM_network_operations(test_base):
-    vim_network_name = None
-    vim_network_uuid = None
-
-    def test_000_create_VIM_network(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.vim_network_name = _get_random_string(20)
-        self.__class__.test_index += 1
-        network = test_config["client"].vim_action("create", "networks", name=self.__class__.vim_network_name)
-        logger.debug("{}".format(network))
-        self.__class__.vim_network_uuid = network["network"]["id"]
-        self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
-
-    def test_010_list_VIM_networks(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        networks = test_config["client"].vim_action("list", "networks")
-        logger.debug("{}".format(networks))
-
-    def test_020_get_VIM_network_by_uuid(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        network = test_config["client"].vim_action("show", "networks", uuid=self.__class__.vim_network_uuid)
-        logger.debug("{}".format(network))
-        self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
-
-    def test_030_delete_VIM_network_by_uuid(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        network = test_config["client"].vim_action("delete", "networks", uuid=self.__class__.vim_network_uuid)
-        logger.debug("{}".format(network))
-        assert ('deleted' in network.get('result', ""))
-
-
-class test_VIM_image_operations(test_base):
-
-    def test_000_list_VIM_images(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        images = test_config["client"].vim_action("list", "images")
-        logger.debug("{}".format(images))
-
-'''
-The following is a non critical test that will fail most of the times.
-In case of OpenStack datacenter these tests will only success if RO has access to the admin endpoint
-This test will only be executed in case it is specifically requested by the user
-'''
-class test_VIM_tenant_operations(test_base):
-    vim_tenant_name = None
-    vim_tenant_uuid = None
-
-    @classmethod
-    def setUpClass(cls):
-        test_base.setUpClass(cls)
-        logger.warning("In case of OpenStack datacenter these tests will only success "
-                       "if RO has access to the admin endpoint")
-
-    def test_000_create_VIM_tenant(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.vim_tenant_name = _get_random_string(20)
-        self.__class__.test_index += 1
-        tenant = test_config["client"].vim_action("create", "tenants", name=self.__class__.vim_tenant_name)
-        logger.debug("{}".format(tenant))
-        self.__class__.vim_tenant_uuid = tenant["tenant"]["id"]
-        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
-
-    def test_010_list_VIM_tenants(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        tenants = test_config["client"].vim_action("list", "tenants")
-        logger.debug("{}".format(tenants))
-
-    def test_020_get_VIM_tenant_by_uuid(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        tenant = test_config["client"].vim_action("show", "tenants", uuid=self.__class__.vim_tenant_uuid)
-        logger.debug("{}".format(tenant))
-        self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
-
-    def test_030_delete_VIM_tenant_by_uuid(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        tenant = test_config["client"].vim_action("delete", "tenants", uuid=self.__class__.vim_tenant_uuid)
-        logger.debug("{}".format(tenant))
-        assert ('deleted' in tenant.get('result', ""))
-
-
-class test_vimconn_connect(test_base):
-
-    def test_000_connect(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-
-        self.__class__.test_index += 1
-        if test_config['vimtype'] == 'vmware':
-            vca_object = test_config["vim_conn"].connect()
-            logger.debug("{}".format(vca_object))
-            self.assertIsNotNone(vca_object)
-        elif test_config['vimtype'] == 'openstack':
-            test_config["vim_conn"]._reload_connection()
-            network_list = test_config["vim_conn"].get_network_list()
-            logger.debug("{}".format(network_list))
-            self.assertIsNotNone(network_list)
-
-class test_vimconn_new_network(test_base):
-    network_name = None
-
-    def test_000_new_network(self):
-        self.__class__.network_name = _get_random_string(20)
-        network_type = 'bridge'
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                     self.__class__.test_index, inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                          net_type=network_type)
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-
-        network_list = test_config["vim_conn"].get_network_list()
-        for net in network_list:
-            if self.__class__.network_name in net.get('name'):
-                self.assertIn(self.__class__.network_name, net.get('name'))
-                self.assertEqual(net.get('type'), network_type)
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_010_new_network_by_types(self):
-        delete_net_ids = []
-        network_types = ['data','bridge','mgmt']
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        for net_type in network_types:
-            self.__class__.network_name = _get_random_string(20)
-            network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                                                net_type=net_type)
-
-            delete_net_ids.append(network_id)
-            logger.debug("{}".format(network_id))
-
-            network_list = test_config["vim_conn"].get_network_list()
-            for net in network_list:
-                if self.__class__.network_name in net.get('name'):
-                    self.assertIn(self.__class__.network_name, net.get('name'))
-                if net_type in net.get('type'):
-                    self.assertEqual(net.get('type'), net_type)
-                else:
-                    self.assertNotEqual(net.get('type'), net_type)
-
-        # Deleting created network
-        for net_id in delete_net_ids:
-            result = test_config["vim_conn"].delete_network(net_id)
-            if result:
-                logger.info("Network id {} sucessfully deleted".format(net_id))
-            else:
-                logger.info("Failed to delete network id {}".format(net_id))
-
-    def test_020_new_network_by_ipprofile(self):
-        test_directory_content = os.listdir(test_config["test_directory"])
-
-        for dir_name in test_directory_content:
-            if dir_name == 'simple_multi_vnfc':
-                self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
-                vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
-                break
-
-        for vnfd in vnfd_files:
-            with open(vnfd, 'r') as stream:
-                vnf_descriptor = yaml.load(stream)
-
-            internal_connections_list = vnf_descriptor['vnf']['internal-connections']
-            for item in internal_connections_list:
-                if 'ip-profile' in item:
-                    version = item['ip-profile']['ip-version']
-                    dhcp_count = item['ip-profile']['dhcp']['count']
-                    dhcp_enabled = item['ip-profile']['dhcp']['enabled']
-                    dhcp_start_address = item['ip-profile']['dhcp']['start-address']
-                    subnet_address = item['ip-profile']['subnet-address']
-
-
-        self.__class__.network_name = _get_random_string(20)
-        ip_profile = {'dhcp_count': dhcp_count,
-                      'dhcp_enabled': dhcp_enabled,
-                      'dhcp_start_address': dhcp_start_address,
-                      'ip_version': version,
-                      'subnet_address': subnet_address
-                     }
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                                           net_type='mgmt',
-                                                                     ip_profile=ip_profile)
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-
-        network_list = test_config["vim_conn"].get_network_list()
-        for net in network_list:
-            if self.__class__.network_name in net.get('name'):
-                self.assertIn(self.__class__.network_name, net.get('name'))
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_030_new_network_by_isshared(self):
-        self.__class__.network_name = _get_random_string(20)
-        shared = True
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                                         net_type='bridge',
-                                                                             shared=shared)
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-
-        network_list = test_config["vim_conn"].get_network_list()
-        for net in network_list:
-            if self.__class__.network_name in net.get('name'):
-                self.assertIn(self.__class__.network_name, net.get('name'))
-                self.assertEqual(net.get('shared'), shared)
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_040_new_network_by_negative(self):
-        self.__class__.network_name = _get_random_string(20)
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                                    net_type='unknowntype')
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-        network_list = test_config["vim_conn"].get_network_list()
-        for net in network_list:
-            if self.__class__.network_name in net.get('name'):
-                self.assertIn(self.__class__.network_name, net.get('name'))
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_050_refresh_nets_status(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        # creating new network
-        network_name = _get_random_string(20)
-        net_type = 'bridge'
-        network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
-                                                          net_type=net_type)
-        # refresh net status
-        net_dict = test_config["vim_conn"].refresh_nets_status([network_id])
-        for attr in net_dict[network_id]:
-            if attr == 'status':
-                self.assertEqual(net_dict[network_id][attr], 'ACTIVE')
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(network_id))
-
-    def test_060_refresh_nets_status_negative(self):
-        unknown_net_id = str(uuid.uuid4())
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # refresh net status
-        net_dict = test_config["vim_conn"].refresh_nets_status([unknown_net_id])
-        if test_config['vimtype'] == 'openstack':
-            self.assertEqual(net_dict[unknown_net_id]['status'], 'DELETED')
-        else:
-            # TODO : Fix vmware connector to return status DELETED as per vimconn.py
-            self.assertEqual(net_dict, {})
-
-class test_vimconn_get_network_list(test_base):
-    network_name = None
-
-    def setUp(self):
-        # creating new network
-        self.__class__.network_name = _get_random_string(20)
-        self.__class__.net_type = 'bridge'
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                          net_type=self.__class__.net_type)
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-
-    def tearDown(self):
-        test_base.tearDown(self)
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_000_get_network_list(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        network_list = test_config["vim_conn"].get_network_list()
-        for net in network_list:
-            if self.__class__.network_name in net.get('name'):
-                self.assertIn(self.__class__.network_name, net.get('name'))
-                self.assertEqual(net.get('type'), self.__class__.net_type)
-                self.assertEqual(net.get('status'), 'ACTIVE')
-                self.assertEqual(net.get('shared'), False)
-
-    def test_010_get_network_list_by_name(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        if test_config['vimtype'] == 'openstack':
-            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
-        else:
-            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
-
-        # find network from list by it's name
-        new_network_list = test_config["vim_conn"].get_network_list({'name': network_name})
-        for list_item in new_network_list:
-            if self.__class__.network_name in list_item.get('name'):
-                self.assertEqual(network_name, list_item.get('name'))
-                self.assertEqual(list_item.get('type'), self.__class__.net_type)
-                self.assertEqual(list_item.get('status'), 'ACTIVE')
-
-    def test_020_get_network_list_by_id(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # find network from list by it's id
-        new_network_list = test_config["vim_conn"].get_network_list({'id':self.__class__.network_id})
-        for list_item in new_network_list:
-            if self.__class__.network_id in list_item.get('id'):
-                self.assertEqual(self.__class__.network_id, list_item.get('id'))
-                self.assertEqual(list_item.get('type'), self.__class__.net_type)
-                self.assertEqual(list_item.get('status'), 'ACTIVE')
-
-    def test_030_get_network_list_by_shared(self):
-        Shared = False
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        if test_config['vimtype'] == 'openstack':
-            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
-        else:
-            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
-        # find network from list by it's shared value
-        new_network_list = test_config["vim_conn"].get_network_list({'shared':Shared,
-                                                                'name':network_name})
-        for list_item in new_network_list:
-            if list_item.get('shared') == Shared:
-                self.assertEqual(list_item.get('shared'), Shared)
-                self.assertEqual(list_item.get('type'), self.__class__.net_type)
-                self.assertEqual(network_name, list_item.get('name'))
-
-    def test_040_get_network_list_by_tenant_id(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        tenant_list = test_config["vim_conn"].get_tenant_list()
-        if test_config['vimtype'] == 'openstack':
-            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
-        else:
-            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
-
-        for tenant_item in tenant_list:
-            if test_config['tenant'] == tenant_item.get('name'):
-                # find network from list by it's tenant id
-                tenant_id = tenant_item.get('id')
-                new_network_list = test_config["vim_conn"].get_network_list({'tenant_id':tenant_id,
-                                                                              'name':network_name})
-                for list_item in new_network_list:
-                    self.assertEqual(tenant_id, list_item.get('tenant_id'))
-                    self.assertEqual(network_name, list_item.get('name'))
-                    self.assertEqual(list_item.get('type'), self.__class__.net_type)
-                    self.assertEqual(list_item.get('status'), 'ACTIVE')
-
-    def test_050_get_network_list_by_status(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        status = 'ACTIVE'
-
-        if test_config['vimtype'] == 'openstack':
-            network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
-        else:
-            network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
-
-        # find network from list by it's status
-        new_network_list = test_config["vim_conn"].get_network_list({'status':status,
-                                                               'name': network_name})
-        for list_item in new_network_list:
-            self.assertIn(self.__class__.network_name, list_item.get('name'))
-            self.assertEqual(list_item.get('type'), self.__class__.net_type)
-            self.assertEqual(list_item.get('status'), status)
-
-    def test_060_get_network_list_by_negative(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        network_list = test_config["vim_conn"].get_network_list({'name': 'unknown_name'})
-        self.assertEqual(network_list, [])
-
-class test_vimconn_get_network(test_base):
-    network_name = None
-
-    def setUp(self):
-        # creating new network
-        self.__class__.network_name = _get_random_string(20)
-        self.__class__.net_type = 'bridge'
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                          net_type=self.__class__.net_type)
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-
-    def tearDown(self):
-        test_base.tearDown(self)
-
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_000_get_network(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        network_info = test_config["vim_conn"].get_network(self.__class__.network_id)
-        self.assertEqual(network_info.get('status'), 'ACTIVE')
-        self.assertIn(self.__class__.network_name, network_info.get('name'))
-        self.assertEqual(network_info.get('type'), self.__class__.net_type)
-        self.assertEqual(network_info.get('id'), self.__class__.network_id)
-
-    def test_010_get_network_negative(self):
-        Non_exist_id = str(uuid.uuid4())
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].get_network(Non_exist_id)
-
-        self.assertEqual((context.exception).http_code, 404)
-
-class test_vimconn_delete_network(test_base):
-    network_name = None
-
-    def test_000_delete_network(self):
-        # Creating network
-        self.__class__.network_name = _get_random_string(20)
-        self.__class__.net_type = 'bridge'
-        network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                          net_type=self.__class__.net_type)
-        self.__class__.network_id = network
-        logger.debug("{}".format(network))
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-        time.sleep(5)
-        # after deleting network we check in network list
-        network_list = test_config["vim_conn"].get_network_list({ 'id':self.__class__.network_id })
-        self.assertEqual(network_list, [])
-
-    def test_010_delete_network_negative(self):
-        Non_exist_id = str(uuid.uuid4())
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].delete_network(Non_exist_id)
-
-        self.assertEqual((context.exception).http_code, 404)
-
-class test_vimconn_get_flavor(test_base):
-
-    def test_000_get_flavor(self):
-        test_directory_content = os.listdir(test_config["test_directory"])
-
-        for dir_name in test_directory_content:
-            if dir_name == 'simple_linux':
-                self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
-                vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
-                break
-
-        for vnfd in vnfd_files:
-            with open(vnfd, 'r') as stream:
-                vnf_descriptor = yaml.load(stream)
-
-            vnfc_list = vnf_descriptor['vnf']['VNFC']
-            for item in vnfc_list:
-                if 'ram' in item and 'vcpus' in item and 'disk' in item:
-                    ram = item['ram']
-                    vcpus = item['vcpus']
-                    disk = item['disk']
-
-        flavor_data = {
-                      'name' : _get_random_string(20),
-                      'ram': ram,
-                      'vcpus': vcpus,
-                      'disk': disk
-                    }
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-        # get flavor by id
-        result = test_config["vim_conn"].get_flavor(flavor_id)
-        self.assertEqual(ram, result['ram'])
-        self.assertEqual(vcpus, result['vcpus'])
-        self.assertEqual(disk, result['disk'])
-
-        # delete flavor
-        result = test_config["vim_conn"].delete_flavor(flavor_id)
-        if result:
-            logger.info("Flavor id {} sucessfully deleted".format(result))
-        else:
-            logger.info("Failed to delete flavor id {}".format(result))
-
-    def test_010_get_flavor_negative(self):
-        Non_exist_flavor_id = str(uuid.uuid4())
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].get_flavor(Non_exist_flavor_id)
-
-        self.assertEqual((context.exception).http_code, 404)
-
-class test_vimconn_new_flavor(test_base):
-    flavor_id = None
-
-    def test_000_new_flavor(self):
-        flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vpcus': 1, 'disk': 10}
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # create new flavor
-        self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-        self.assertIsInstance(self.__class__.flavor_id, (str, unicode))
-        self.assertIsInstance(uuid.UUID(self.__class__.flavor_id), uuid.UUID)
-
-    def test_010_delete_flavor(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # delete flavor
-        result = test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
-        if result:
-            logger.info("Flavor id {} sucessfully deleted".format(result))
-        else:
-            logger.error("Failed to delete flavor id {}".format(result))
-            raise Exception ("Failed to delete created flavor")
-
-    def test_020_new_flavor_negative(self):
-        Invalid_flavor_data = {'ram': '1024', 'vcpus': 2.0, 'disk': 2.0}
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].new_flavor(Invalid_flavor_data)
-
-        self.assertEqual((context.exception).http_code, 400)
-
-    def test_030_delete_flavor_negative(self):
-        Non_exist_flavor_id = str(uuid.uuid4())
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].delete_flavor(Non_exist_flavor_id)
-
-        self.assertEqual((context.exception).http_code, 404)
-
-# class test_vimconn_new_image(test_base):
-#
-#     def test_000_new_image(self):
-#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-#                                                             self.__class__.test_index,
-#                                                 inspect.currentframe().f_code.co_name)
-#         self.__class__.test_index += 1
-#
-#         image_path = test_config['image_path']
-#         if image_path:
-#             self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path, 'metadata': {'upload_location':None} })
-#             time.sleep(20)
-#
-#             self.assertIsInstance(self.__class__.image_id, (str, unicode))
-#             self.assertIsInstance(uuid.UUID(self.__class__.image_id), uuid.UUID)
-#         else:
-#             self.skipTest("Skipping test as image file not present at RO container")
-#
-#     def test_010_new_image_negative(self):
-#         Non_exist_image_path = '/temp1/cirros.ovf'
-#
-#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-#                                                             self.__class__.test_index,
-#                                                 inspect.currentframe().f_code.co_name)
-#         self.__class__.test_index += 1
-#
-#         with self.assertRaises(Exception) as context:
-#             test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path})
-#
-#         self.assertEqual((context.exception).http_code, 400)
-#
-#     def test_020_delete_image(self):
-#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-#                                                             self.__class__.test_index,
-#                                                 inspect.currentframe().f_code.co_name)
-#         self.__class__.test_index += 1
-#
-#         image_id = test_config["vim_conn"].delete_image(self.__class__.image_id)
-#
-#         self.assertIsInstance(image_id, (str, unicode))
-#
-#     def test_030_delete_image_negative(self):
-#         Non_exist_image_id = str(uuid.uuid4())
-#
-#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-#                                                             self.__class__.test_index,
-#                                                 inspect.currentframe().f_code.co_name)
-#         self.__class__.test_index += 1
-#
-#         with self.assertRaises(Exception) as context:
-#             test_config["vim_conn"].delete_image(Non_exist_image_id)
-#
-#         self.assertEqual((context.exception).http_code, 404)
-
-# class test_vimconn_get_image_id_from_path(test_base):
-#
-#     def test_000_get_image_id_from_path(self):
-#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-#                                                             self.__class__.test_index,
-#                                                 inspect.currentframe().f_code.co_name)
-#         self.__class__.test_index += 1
-#
-#         image_path = test_config['image_path']
-#         if image_path:
-#             image_id = test_config["vim_conn"].get_image_id_from_path( image_path )
-#             self.assertEqual(type(image_id),str)
-#         else:
-#             self.skipTest("Skipping test as image file not present at RO container")
-#
-#     def test_010_get_image_id_from_path_negative(self):
-#         Non_exist_image_path = '/temp1/cirros.ovf'
-#
-#         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-#                                                             self.__class__.test_index,
-#                                                 inspect.currentframe().f_code.co_name)
-#         self.__class__.test_index += 1
-#
-#         with self.assertRaises(Exception) as context:
-#             test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
-#
-#         self.assertEqual((context.exception).http_code, 400)
-
-class test_vimconn_get_image_list(test_base):
-    image_name = None
-    image_id = None
-
-    def test_000_get_image_list(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        image_list = test_config["vim_conn"].get_image_list()
-
-        for item in image_list:
-            if 'name' in item:
-                self.__class__.image_name = item['name']
-                self.__class__.image_id = item['id']
-                self.assertIsInstance(self.__class__.image_name, (str, unicode))
-                self.assertIsInstance(self.__class__.image_id, (str, unicode))
-
-    def test_010_get_image_list_by_name(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        image_list = test_config["vim_conn"].get_image_list({'name': self.__class__.image_name})
-
-        for item in image_list:
-            self.assertIsInstance(item['id'], (str, unicode))
-            self.assertIsInstance(item['name'], (str, unicode))
-            self.assertEqual(item['id'], self.__class__.image_id)
-            self.assertEqual(item['name'], self.__class__.image_name)
-
-    def test_020_get_image_list_by_id(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        filter_image_list = test_config["vim_conn"].get_image_list({'id': self.__class__.image_id})
-
-        for item1 in filter_image_list:
-            self.assertIsInstance(item1['id'], (str, unicode))
-            self.assertIsInstance(item1['name'], (str, unicode))
-            self.assertEqual(item1['id'], self.__class__.image_id)
-            self.assertEqual(item1['name'], self.__class__.image_name)
-
-    def test_030_get_image_list_negative(self):
-        Non_exist_image_id = uuid.uuid4()
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-        image_list = test_config["vim_conn"].get_image_list({'name': 'Unknown_name', 'id': Non_exist_image_id})
-
-        self.assertIsNotNone(image_list, None)
-        self.assertEqual(image_list, [])
-
-class test_vimconn_new_vminstance(test_base):
-    network_name = None
-    net_type = None
-    network_id = None
-    image_id = None
-    instance_id = None
-
-    def setUp(self):
-        # create network
-        self.__class__.network_name = _get_random_string(20)
-        self.__class__.net_type = 'bridge'
-
-        self.__class__.network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
-                                                                            net_type=self.__class__.net_type)
-        # find image name and image id
-        if test_config['image_name']:
-            image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
-            if len(image_list) == 0:
-                raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
-            else:
-                self.__class__.image_id = image_list[0]['id']
-        else:
-            image_list = test_config['vim_conn'].get_image_list()
-            if len(image_list) == 0:
-                raise Exception("Not found any image at VIM")
-            else:
-                self.__class__.image_id = image_list[0]['id']
-
-    def tearDown(self):
-        test_base.tearDown(self)
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
-
-    def test_000_new_vminstance(self):
-        vpci = "0000:00:11.0"
-        name = "eth0"
-
-        flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-        self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
-
-        self.assertIsInstance(self.__class__.instance_id, (str, unicode))
-
-    def test_010_new_vminstance_by_model(self):
-        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
-        model_name = 'e1000'
-        name = 'eth0'
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,flavor_id=flavor_id,net_list=net_list)
-
-        self.assertIsInstance(instance_id, (str, unicode))
-
-        # Deleting created vm instance
-        logger.info("Deleting created vm intance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_020_new_vminstance_by_net_use(self):
-        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
-        net_use = 'data'
-        name = 'eth0'
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,disk_list=None,
-                                                                                           flavor_id=flavor_id,
-                                                                                             net_list=net_list)
-        self.assertIsInstance(instance_id, (str, unicode))
-
-        # Deleting created vm instance
-        logger.info("Deleting created vm intance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_030_new_vminstance_by_net_type(self):
-        flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
-        _type = 'VF'
-        name = 'eth0'
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        if test_config['vimtype'] == 'vmware':
-            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
-                         'type': _type, 'net_id': self.__class__.network_id}]
-
-            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
-                                                                    flavor_id=flavor_id,
-                                                                    net_list=net_list)
-            self.assertEqual(type(instance_id),str)
-
-        if test_config['vimtype'] == 'openstack':
-            # create network of type data
-            network_name = _get_random_string(20)
-            net_type = 'data'
-
-            network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
-                                                                            net_type=net_type)
-            net_list = [{'use': net_type, 'name': name, 'floating_ip': False, 'port_security': True,
-                         'type': _type, 'net_id': network_id}]
-
-            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
-                                                                    image_id=self.__class__.image_id, disk_list=None,
-                                                                    flavor_id=flavor_id,
-                                                                    net_list=net_list)
-
-            self.assertEqual(type(instance_id), unicode)
-
-            # delete created network
-            result = test_config["vim_conn"].delete_network(network_id)
-            if result:
-                logger.info("Network id {} sucessfully deleted".format(network_id))
-            else:
-                logger.info("Failed to delete network id {}".format(network_id))
-
-        # Deleting created vm instance
-        logger.info("Deleting created vm intance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_040_new_vminstance_by_cloud_config(self):
-        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
-        name = 'eth0'
-        user_name = 'test_user'
-
-        key_pairs = ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com']
-
-        users_data = [{'key-pairs': ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com'], 'name': user_name}]
-
-        cloud_data = {'config-files': [{'content': 'auto enp0s3\niface enp0s3 inet dhcp\n', 'dest': '/etc/network/interfaces.d/enp0s3.cfg', 'owner': 'root:root', 'permissions': '0644'}, {'content': '#! /bin/bash\nls -al >> /var/log/osm.log\n', 'dest': '/etc/rc.local', 'permissions': '0755'}, {'content': 'file content', 'dest': '/etc/test_delete'}], 'boot-data-drive': True, 'key-pairs': key_pairs, 'users': users_data }
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Cloud_vm', description='', start=False,
-                                                                image_id=self.__class__.image_id, flavor_id=flavor_id,net_list=net_list,cloud_config=cloud_data)
-
-        self.assertIsInstance(instance_id, (str, unicode))
-
-        # Deleting created vm instance
-        logger.info("Deleting created vm intance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_050_new_vminstance_by_disk_list(self):
-        flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
-        name = 'eth0'
-
-        device_data = [{'image_id': self.__class__.image_id, 'size': '10'}]
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', description='', start=False, image_id=self.__class__.image_id,
-                                                                                           flavor_id=flavor_id,
-                                                                                             net_list=net_list,
-                                                                                         disk_list=device_data)
-
-        self.assertIsInstance(instance_id, (str, unicode))
-        # Deleting created vm instance
-        logger.info("Deleting created vm intance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_060_new_vminstance_negative(self):
-        unknown_flavor_id = str(uuid.uuid4())
-        unknown_image_id = str(uuid.uuid4())
-        name = 'eth2'
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=unknown_image_id,
-                                                                  flavor_id=unknown_flavor_id,
-                                                                            net_list=net_list)
-
-        self.assertIn((context.exception).http_code, (400, 404))
-
-
-    def test_070_get_vminstance(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # Get instance by its id
-        vm_info = test_config["vim_conn"].get_vminstance(self.__class__.instance_id)
-
-        if test_config['vimtype'] == 'vmware':
-            for attr in vm_info:
-                if attr == 'status':
-                    self.assertEqual(vm_info[attr], 'ACTIVE')
-                if attr == 'hostId':
-                    self.assertEqual(type(vm_info[attr]), str)
-                if attr == 'interfaces':
-                    self.assertEqual(type(vm_info[attr]), list)
-                    self.assertEqual(vm_info[attr][0]['IsConnected'], 'true')
-                if attr == 'IsEnabled':
-                    self.assertEqual(vm_info[attr], 'true')
-
-    def test_080_get_vminstance_negative(self):
-        unknown_instance_id = str(uuid.uuid4())
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].get_vminstance(unknown_instance_id)
-
-        self.assertEqual((context.exception).http_code, 404)
-
-    def test_090_refresh_vms_status(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        if test_config['vimtype'] == 'vmware':
-            vm_list = []
-            vm_list.append(self.__class__.instance_id)
-
-            # refresh vm status
-            vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
-            for attr in vm_info[self.__class__.instance_id]:
-                if attr == 'status':
-                    self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
-                if attr == 'interfaces':
-                    self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
-
-        if test_config['vimtype'] == 'openstack':
-            vpci = "0000:00:11.0"
-            name = "eth0"
-
-            flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-            # create new flavor
-            flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-             # create new vm instance
-            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
-
-            time.sleep(30)
-            vm_list = []
-            vm_list.append(instance_id)
-
-            # refresh vm status
-            vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
-            for attr in vm_info[instance_id]:
-                if attr == 'status':
-                    self.assertEqual(vm_info[instance_id][attr], 'ACTIVE')
-                if attr == 'interfaces':
-                    self.assertEqual(type(vm_info[instance_id][attr]), list)
-
-            #Deleting created vm instance
-            logger.info("Deleting created vm intance")
-            test_config["vim_conn"].delete_vminstance(instance_id)
-            time.sleep(10)
-
-
-    def test_100_refresh_vms_status_negative(self):
-        unknown_id = str(uuid.uuid4())
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        vm_dict = test_config["vim_conn"].refresh_vms_status([unknown_id])
-
-        if test_config['vimtype'] == 'vmware':
-            self.assertEqual(vm_dict,{})
-
-        if test_config['vimtype'] == 'openstack':
-            self.assertEqual(vm_dict[unknown_id]['status'], 'DELETED')
-
-    def test_110_action_vminstance(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        if test_config['vimtype'] == 'vmware':
-            action_list = ['shutdown', 'start', 'shutoff', 'rebuild', 'pause', 'resume']
-            # various action on vminstace
-            for action in action_list:
-                instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
-                                                                        {action: None})
-                self.assertEqual(instance_id, self.__class__.instance_id)
-
-        if test_config['vimtype'] == 'openstack':
-            # create new vm instance
-            vpci = "0000:00:11.0"
-            name = "eth0"
-
-            flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-            # create new flavor
-            flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
-
-            new_instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
-
-            action_list =  ['shutdown','start','shutoff','rebuild','start','pause','start']
-
-            # various action on vminstace
-            for action in action_list:
-                # sleep for sometime till status is changed
-                time.sleep(25)
-                instance_id = test_config["vim_conn"].action_vminstance(new_instance_id,
-                                                                                   { action: None})
-
-            self.assertTrue(instance_id is None)
-
-            # Deleting created vm instance
-            logger.info("Deleting created vm intance")
-            test_config["vim_conn"].delete_vminstance(new_instance_id)
-            time.sleep(10)
-
-    def test_120_action_vminstance_negative(self):
-        non_exist_id = str(uuid.uuid4())
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        action = 'start'
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].action_vminstance(non_exist_id, { action: None})
-
-        self.assertEqual((context.exception).http_code, 404)
-
-
-    def test_130_delete_vminstance(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # Deleting created vm instance
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(self.__class__.instance_id)
-        time.sleep(10)
-
-    def test_140_new_vminstance_sriov(self):
-        logger.info("Testing creation of sriov vm instance using {}".format(test_config['sriov_net_name']))
-        flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
-        name = 'eth0'
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        sriov_net_name = test_config['sriov_net_name']
-        new_network_list = test_config["vim_conn"].get_network_list({'name': sriov_net_name})
-        for list_item in new_network_list:
-            self.assertEqual(sriov_net_name, list_item.get('name'))
-            self.__class__.sriov_network_id = list_item.get('id')
-
-        net_list = [{'use': 'data', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'VF', 'net_id': self.__class__.sriov_network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_sriov_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
-
-        self.assertIsInstance(instance_id, (str, unicode))
-
-        logger.info("Waiting for created sriov-vm intance")
-        time.sleep(10)
-        # Deleting created vm instance
-        logger.info("Deleting created sriov-vm intance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-class test_vimconn_get_tenant_list(test_base):
-    tenant_id = None
-
-    def test_000_get_tenant_list(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # Getting tenant list
-        tenant_list = test_config["vim_conn"].get_tenant_list()
-
-        for item in tenant_list:
-            if test_config['tenant'] == item['name']:
-                self.__class__.tenant_id = item['id']
-                self.assertIsInstance(item['name'], (str, unicode))
-                self.assertIsInstance(item['id'], (str, unicode))
-
-    def test_010_get_tenant_list_by_id(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # Getting filter tenant list by its id
-        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'id': self.__class__.tenant_id})
-
-        for item in filter_tenant_list:
-            self.assertIsInstance(item['id'], (str, unicode))
-            self.assertEqual(item['id'], self.__class__.tenant_id)
-
-    def test_020_get_tenant_list_by_name(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # Getting filter tenant list by its name
-        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant']})
-
-        for item in filter_tenant_list:
-            self.assertIsInstance(item['name'], (str, unicode))
-            self.assertEqual(item['name'], test_config['tenant'])
-
-    def test_030_get_tenant_list_by_name_and_id(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        # Getting filter tenant list by its name and id
-        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant'],
-                                                                    'id': self.__class__.tenant_id})
-
-        for item in filter_tenant_list:
-            self.assertIsInstance(item['name'], (str, unicode))
-            self.assertIsInstance(item['id'], (str, unicode))
-            self.assertEqual(item['name'], test_config['tenant'])
-            self.assertEqual(item['id'], self.__class__.tenant_id)
-
-    def test_040_get_tenant_list_negative(self):
-        non_exist_tenant_name = "Tenant_123"
-        non_exist_tenant_id = "kjhgrt456-45345kjhdfgnbdk-34dsfjdfg"
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': non_exist_tenant_name,
-                                                                         'id': non_exist_tenant_id})
-
-        self.assertEqual(filter_tenant_list, [])
-
-
-class test_vimconn_new_tenant(test_base):
-    tenant_id = None
-
-    def test_000_new_tenant(self):
-        tenant_name = _get_random_string(20)
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name, "")
-        time.sleep(15)
-
-        self.assertIsInstance(self.__class__.tenant_id, (str, unicode))
-
-
-    def test_010_new_tenant_negative(self):
-        Invalid_tenant_name = 10121
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].new_tenant(Invalid_tenant_name, "")
-
-        self.assertEqual((context.exception).http_code, 400)
-
-
-    def test_020_delete_tenant(self):
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
-
-        self.assertIsInstance(tenant_id, (str, unicode))
-
-    def test_030_delete_tenant_negative(self):
-        Non_exist_tenant_name = 'Test_30_tenant'
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].delete_tenant(Non_exist_tenant_name)
-
-        self.assertEqual((context.exception).http_code, 404)
-
-
-def get_image_id():
-    if test_config['image_name']:
-        image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
-        if len(image_list) == 0:
-            raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
-        else:
-            image_id = image_list[0]['id']
-    else:
-        image_list = test_config['vim_conn'].get_image_list()
-        if len(image_list) == 0:
-            raise Exception("Not found any image at VIM")
-        else:
-            image_id = image_list[0]['id']
-    return image_id
-
-
-class test_vimconn_vminstance_by_ip_address(test_base):
-    network_name = None
-    network_id = None
-
-    def setUp(self):
-        # create network
-        self.network_name = _get_random_string(20)
-
-        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
-                                                                       net_type='bridge')
-
-    def tearDown(self):
-        test_base.tearDown(self)
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.network_id))
-
-
-    def test_000_vminstance_by_ip_address(self):
-        """
-           This test case will deploy VM with provided IP address
-           Pre-requesite: provided IP address should be from IP pool range which has used for network creation
-        """
-        name = "eth0"
-        # provide ip address  
-        ip_address = '' 
-
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
-                                                    'net_id': self.network_id, 'ip_address': ip_address}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                                            flavor_id=flavor_id, net_list=net_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_010_vminstance_by_ip_address_negative(self):
-        name = "eth1"
-        # IP address not from subnet range
-        invalid_ip_address = '10.10.12.1'
-
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
-                                                      'net_id': self.network_id, 'ip_address': invalid_ip_address}]
-
-        with self.assertRaises(Exception) as context:
-            test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                                                  flavor_id=flavor_id,
-                                                                    net_list=net_list)
-        self.assertEqual((context.exception).http_code, 400)
-
-    def test_020_vminstance_by_floating_ip(self):
-        name = "eth1"
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': True, 'port_security': True, 'type': 'virtual',
-                                                                                       'net_id': self.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                                            flavor_id=flavor_id, net_list=net_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_030_vminstance_by_mac_address(self):
-        name = "eth1"
-        mac_address = "74:54:2f:21:da:8c" 
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
-                                                             'net_id': self.network_id,'mac_address': mac_address}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                                            flavor_id=flavor_id, net_list=net_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-class test_vimconn_vminstance_by_adding_10_nics(test_base):
-    network_name = None
-    net_ids = [] 
-
-    def setUp(self):
-        # create network
-        i = 0
-        for i in range(10):
-            self.network_name = _get_random_string(20)
-            network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
-                                                                      net_type='bridge')
-            self.net_ids.append(network_id)
-
-    def tearDown(self):
-        test_base.tearDown(self)
-        # Deleting created network
-        for net_id in self.net_ids:
-            result = test_config["vim_conn"].delete_network(net_id)
-            if result:
-                logger.info("Network id {} sucessfully deleted".format(net_id))
-            else:
-                logger.info("Failed to delete network id {}".format(net_id))
-
-    def test_000_vminstance_by_adding_10_nics(self):
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = []
-        c = 1
-        for net_id in self.net_ids:
-            name = "eth{}".format(c)
-            net_list.append({'use': 'bridge', 'name': name, 'floating_ip': False,
-                                    'port_security': True, 'type': 'virtual', 'net_id': net_id})
-            c = c+1
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                                            flavor_id=flavor_id, net_list=net_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-
-class test_vimconn_vminstance_by_existing_disk(test_base):
-    network_name = None
-    network_id = None
-
-    def setUp(self):
-        # create network
-        self.network_name = _get_random_string(20)
-        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
-                                                                       net_type='bridge')
-
-    def tearDown(self):
-        test_base.tearDown(self)
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.network_id))
-
-
-    def test_000_vminstance_by_existing_disk(self):
-        """ This testcase will add existing disk only if given catalog/image is free 
-            means not used by any other VM
-        """
-
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-        name = "eth10"
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-        cirros_image = test_config["vim_conn"].get_image_list({'name': 'cirros'})
-        disk_list = [{'image_id': cirros_image[0]['id'],'size': 5}]
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
-                                        'type': 'virtual', 'net_id': self.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                        flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_010_vminstance_by_new_disk(self):
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-        name = "eth10"
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-        disk_list = [{'size': '5'}]
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
-                                                  'type': 'virtual', 'net_id': self.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                        flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-    def test_020_vminstance_by_CDROM(self):
-        """ This testcase will insert media file only if provided catalog
-            has pre-created ISO media file into vCD
-        """
-        flavor_data ={'ram': 1024, 'vcpus': 1, 'disk': 10}
-        name = "eth10"
-        image_list = test_config["vim_conn"].get_image_list({'name':'Ubuntu'})
-        disk_list = [{'image_id':image_list[0]['id'],'device_type':'cdrom'}]
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
-                                                  'type': 'virtual', 'net_id': self.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                       flavor_id=flavor_id, net_list=net_list,disk_list=disk_list )
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-
-class test_vimconn_vminstance_by_affinity_anti_affinity(test_base):
-    network_name = None
-    network_id = None
-
-    def setUp(self):
-        # create network
-        self.network_name = _get_random_string(20)
-        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
-                                                                       net_type='bridge')
-
-    def tearDown(self):
-        test_base.tearDown(self)
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.network_id))
-
-    def test_000_vminstance_by_affinity_anti_affinity(self):
-        """ This testcase will deploy VM into provided HOSTGROUP in VIM config
-            Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
-            While creating VIM account user has to pass the Host Group names in availability_zone list
-        """
-        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
-        name = "eth10"
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
-                                        'type': 'virtual', 'net_id': self.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                  flavor_id=flavor_id, net_list=net_list,availability_zone_index=1,
-                                                        availability_zone_list=['HG_174','HG_175'])
-
-        self.assertEqual(type(instance_id),str)
-        time.sleep(10)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-
-class test_vimconn_vminstance_by_numa_affinity(test_base):
-    network_name = None
-    network_id = None
-
-    def setUp(self):
-        # create network
-        self.network_name = _get_random_string(20)
-        self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
-                                                                       net_type='bridge')
-
-    def tearDown(self):
-        test_base.tearDown(self)
-        # Deleting created network
-        result = test_config["vim_conn"].delete_network(self.network_id)
-        if result:
-            logger.info("Network id {} sucessfully deleted".format(self.network_id))
-        else:
-            logger.info("Failed to delete network id {}".format(self.network_id))
-
-    def test_000_vminstance_by_numa_affinity(self):
-        flavor_data = {'extended': {'numas': [{'paired-threads-id': [['1', '3'], ['2', '4']],
-                                                                        ' paired-threads': 2,                                                                                                                                  'memory': 1}]},
-                                                         'ram': 1024, 'vcpus': 1, 'disk': 10}
-        name = "eth10"
-
-        # create new flavor
-        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
-
-        # find image name and image id
-        image_id = get_image_id()
-
-        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
-                                                            self.__class__.test_index,
-                                                inspect.currentframe().f_code.co_name)
-        self.__class__.test_index += 1
-
-        net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
-                                        'type': 'virtual', 'net_id': self.network_id}]
-
-        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
-                                                            flavor_id=flavor_id, net_list=net_list)
-
-        self.assertEqual(type(instance_id),str)
-        logger.info("Deleting created vm instance")
-        test_config["vim_conn"].delete_vminstance(instance_id)
-        time.sleep(10)
-
-
-'''
-IMPORTANT NOTE
-The following unittest class does not have the 'test_' on purpose. This test is the one used for the
-scenario based tests.
-'''
-class descriptor_based_scenario_test(test_base):
-    test_index = 0
-    scenario_test_path = None
-
-    @classmethod
-    def setUpClass(cls):
-        cls.test_index = 1
-        cls.to_delete_list = []
-        cls.scenario_uuids = []
-        cls.instance_scenario_uuids = []
-        cls.scenario_test_path = test_config["test_directory"] + '/' + test_config["test_folder"]
-        logger.info("{}. {} {}".format(test_config["test_number"], cls.__name__, test_config["test_folder"]))
-
-    @classmethod
-    def tearDownClass(cls):
-         test_config["test_number"] += 1
-
-    def test_000_load_scenario(self):
-        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name,
-                                                           test_config["test_folder"])
-        self.__class__.test_index += 1
-        # load VNFD and NSD
-        descriptor_files = glob.glob(self.__class__.scenario_test_path+'/*.yaml')
-        vnf_descriptors = []
-        scenario_descriptors = []
-        for descriptor_file in descriptor_files:
-            with open(descriptor_file, 'r') as stream:
-                descriptor = yaml.load(stream)
-                if "vnf" in descriptor or "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
-                    vnf_descriptors.append(descriptor)
-                else:
-                    scenario_descriptors.append(descriptor)
-
-        scenario_file = glob.glob(self.__class__.scenario_test_path + '/scenario_*.yaml')
-        if not vnf_descriptors or not scenario_descriptors or len(scenario_descriptors) > 1:
-            raise Exception("Test '{}' not valid. It must contain an scenario file and at least one vnfd file'".format(
-                test_config["test_folder"]))
-
-        # load all vnfd
-        for vnf_descriptor in vnf_descriptors:
-            logger.debug("VNF descriptor: {}".format(vnf_descriptor))
-            vnf = test_config["client"].create_vnf(descriptor=vnf_descriptor, image_name=test_config["image_name"])
-            logger.debug(vnf)
-            if 'vnf' in vnf:
-                vnf_uuid = vnf['vnf']['uuid']
-            else:
-                vnf_uuid = vnf['vnfd'][0]['uuid']
-            self.__class__.to_delete_list.insert(0, {"item": "vnf", "function": test_config["client"].delete_vnf,
-                                                     "params": {"uuid": vnf_uuid}})
-
-        # load the scenario definition
-        for scenario_descriptor in scenario_descriptors:
-            # networks = scenario_descriptor['scenario']['networks']
-            # networks[test_config["mgmt_net"]] = networks.pop('mgmt')
-            logger.debug("Scenario descriptor: {}".format(scenario_descriptor))
-            scenario = test_config["client"].create_scenario(descriptor=scenario_descriptor)
-            logger.debug(scenario)
-            if 'scenario' in scenario:
-                scenario_uuid = scenario['scenario']['uuid']
-            else:
-                scenario_uuid = scenario['nsd'][0]['uuid']
-            self.__class__.to_delete_list.insert(0, {"item": "scenario",
-                                                     "function": test_config["client"].delete_scenario,
-                                                     "params": {"uuid": scenario_uuid}})
-            self.__class__.scenario_uuids.append(scenario_uuid)
-
-    def test_010_instantiate_scenario(self):
-        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name,
-                                                           test_config["test_folder"])
-        self.__class__.test_index += 1
-        for scenario_uuid in self.__class__.scenario_uuids:
-            instance_descriptor = {
-                "instance":{
-                    "name": self.__class__.test_text,
-                    "scenario": scenario_uuid,
-                    "networks":{
-                        "mgmt": {"sites": [ { "netmap-use": test_config["mgmt_net"]} ]}
-                    }
-                }
-            }
-            instance = test_config["client"].create_instance(instance_descriptor)
-            self.__class__.instance_scenario_uuids.append(instance['uuid'])
-            logger.debug(instance)
-            self.__class__.to_delete_list.insert(0, {"item": "instance",
-                                                     "function": test_config["client"].delete_instance,
-                                                     "params": {"uuid": instance['uuid']}})
-
-    def test_020_check_deployent(self):
-        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
-                                                           inspect.currentframe().f_code.co_name,
-                                                           test_config["test_folder"])
-        self.__class__.test_index += 1
-
-        if test_config["manual"]:
-            raw_input('Scenario has been deployed. Perform manual check and press any key to resume')
-            return
-
-        keep_waiting = test_config["timeout"]
-        pending_instance_scenario_uuids = list(self.__class__.instance_scenario_uuids)   # make a copy
-        while pending_instance_scenario_uuids:
-            index = 0
-            while index < len(pending_instance_scenario_uuids):
-                result = check_instance_scenario_active(pending_instance_scenario_uuids[index])
-                if result[0]:
-                    del pending_instance_scenario_uuids[index]
-                    break
-                elif 'ERROR' in result[1]:
-                    msg = 'Got error while waiting for the instance to get active: '+result[1]
-                    logging.error(msg)
-                    raise Exception(msg)
-                index += 1
-
-            if keep_waiting >= 5:
-                time.sleep(5)
-                keep_waiting -= 5
-            elif keep_waiting > 0:
-                time.sleep(keep_waiting)
-                keep_waiting = 0
-            else:
-                msg = 'Timeout reached while waiting instance scenario to get active'
-                logging.error(msg)
-                raise Exception(msg)
-
-    def test_030_clean_deployment(self):
-        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
-                                                              inspect.currentframe().f_code.co_name,
-                                                              test_config["test_folder"])
-        self.__class__.test_index += 1
-        #At the moment if you delete an scenario right after creating it, in openstack datacenters
-        #sometimes scenario ports get orphaned. This sleep is just a dirty workaround
-        time.sleep(5)
-        for item in self.__class__.to_delete_list:
-            response = item["function"](**item["params"])
-            logger.debug(response)
-
-
-def _get_random_string(maxLength):
-    '''generates a string with random characters string.letters and string.digits
-    with a random length up to maxLength characters. If maxLength is <15 it will be changed automatically to 15
-    '''
-    prefix = 'testing_'
-    min_string = 15
-    minLength = min_string - len(prefix)
-    if maxLength < min_string: maxLength = min_string
-    maxLength -= len(prefix)
-    length = random.randint(minLength,maxLength)
-    return 'testing_'+"".join([random.choice(string.letters+string.digits) for i in xrange(length)])
-
-
-def test_vimconnector(args):
-    global test_config
-    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
-    test_config['vimtype'] = args.vimtype
-    if args.vimtype == "vmware":
-        import vimconn_vmware as vim
-
-        test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
-
-        tenant_name = args.tenant_name
-        test_config['tenant'] = tenant_name
-        config_params = yaml.load(args.config_param)
-        org_name = config_params.get('orgname')
-        org_user = config_params.get('user')
-        org_passwd = config_params.get('passwd')
-        vim_url = args.endpoint_url
-        test_config['image_path'] = args.image_path
-        test_config['image_name'] = args.image_name
-        test_config['sriov_net_name'] = args.sriov_net_name
-
-        # vmware connector obj
-        test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,passwd=org_passwd, url=vim_url, config=config_params)
-
-    elif args.vimtype == "aws":
-        import vimconn_aws as vim
-    elif args.vimtype == "openstack":
-        import vimconn_openstack as vim
-
-        test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
-
-        tenant_name = args.tenant_name
-        test_config['tenant'] = tenant_name
-        config_params = yaml.load(args.config_param)
-        os_user = config_params.get('user')
-        os_passwd = config_params.get('passwd')
-        vim_url = args.endpoint_url
-        test_config['image_path'] = args.image_path
-        test_config['image_name'] = args.image_name
-        test_config['sriov_net_name'] = args.sriov_net_name
-
-        # openstack connector obj
-        vim_persistent_info = {}
-        test_config['vim_conn'] = vim.vimconnector(
-            uuid="test-uuid-1", name="VIO-openstack",
-            tenant_id=None, tenant_name=tenant_name,
-            url=vim_url, url_admin=None,
-            user=os_user, passwd=os_passwd,
-            config=config_params, persistent_info=vim_persistent_info
-        )
-        test_config['vim_conn'].debug = "true"
-
-    elif args.vimtype == "openvim":
-        import vimconn_openvim as vim
-    else:
-        logger.critical("vimtype '{}' not supported".format(args.vimtype))
-        sys.exit(1)
-    executed = 0
-    failed = 0
-    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
-    # If only want to obtain a tests list print it and exit
-    if args.list_tests:
-        tests_names = []
-        for cls in clsmembers:
-            if cls[0].startswith('test_vimconn'):
-                tests_names.append(cls[0])
-
-        msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names))
-        print(msg)
-        logger.info(msg)
-        sys.exit(0)
-
-    # Create the list of tests to be run
-    code_based_tests = []
-    if args.tests:
-        for test in args.tests:
-            for t in test.split(','):
-                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
-                if len(matches_code_based_tests) > 0:
-                    code_based_tests.append(matches_code_based_tests[0][1])
-                else:
-                    logger.critical("Test '{}' is not among the possible ones".format(t))
-                    sys.exit(1)
-    if not code_based_tests:
-        # include all tests
-        for cls in clsmembers:
-            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
-            if cls[0].startswith('test_vimconn'):
-                code_based_tests.append(cls[1])
-
-    logger.debug("tests to be executed: {}".format(code_based_tests))
-
-    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
-    # This is handled in the tests using logging.
-    stream = open('/dev/null', 'w')
-
-    # Run code based tests
-    basic_tests_suite = unittest.TestSuite()
-    for test in code_based_tests:
-        basic_tests_suite.addTest(unittest.makeSuite(test))
-    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
-    executed += result.testsRun
-    failed += len(result.failures) + len(result.errors)
-    if failfast and failed:
-        sys.exit(1)
-    if len(result.failures) > 0:
-        logger.debug("failures : {}".format(result.failures))
-    if len(result.errors) > 0:
-        logger.debug("errors : {}".format(result.errors))
-    return executed, failed
-
-
-def test_vim(args):
-    global test_config
-    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
-    import openmanoclient
-    executed = 0
-    failed = 0
-    test_config["client"] = openmanoclient.openmanoclient(
-        endpoint_url=args.endpoint_url,
-        tenant_name=args.tenant_name,
-        datacenter_name=args.datacenter,
-        debug=args.debug, logger=test_config["logger_name"])
-    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
-    # If only want to obtain a tests list print it and exit
-    if args.list_tests:
-        tests_names = []
-        for cls in clsmembers:
-            if cls[0].startswith('test_VIM'):
-                tests_names.append(cls[0])
-
-        msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
-              "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
-              "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
-        print(msg)
-        logger.info(msg)
-        sys.exit(0)
-
-    # Create the list of tests to be run
-    code_based_tests = []
-    if args.tests:
-        for test in args.tests:
-            for t in test.split(','):
-                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
-                if len(matches_code_based_tests) > 0:
-                    code_based_tests.append(matches_code_based_tests[0][1])
-                else:
-                    logger.critical("Test '{}' is not among the possible ones".format(t))
-                    sys.exit(1)
-    if not code_based_tests:
-        # include all tests
-        for cls in clsmembers:
-            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
-            if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
-                code_based_tests.append(cls[1])
-
-    logger.debug("tests to be executed: {}".format(code_based_tests))
-
-    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
-    # This is handled in the tests using logging.
-    stream = open('/dev/null', 'w')
-
-    # Run code based tests
-    basic_tests_suite = unittest.TestSuite()
-    for test in code_based_tests:
-        basic_tests_suite.addTest(unittest.makeSuite(test))
-    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
-    executed += result.testsRun
-    failed += len(result.failures) + len(result.errors)
-    if failfast and failed:
-        sys.exit(1)
-    if len(result.failures) > 0:
-        logger.debug("failures : {}".format(result.failures))
-    if len(result.errors) > 0:
-        logger.debug("errors : {}".format(result.errors))
-    return executed, failed
-
-
-def test_wim(args):
-    global test_config
-    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
-    import openmanoclient
-    executed = 0
-    failed = 0
-    test_config["client"] = openmanoclient.openmanoclient(
-        endpoint_url=args.endpoint_url,
-        tenant_name=args.tenant_name,
-        datacenter_name=args.datacenter,
-        debug=args.debug, logger=test_config["logger_name"])
-    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
-    # If only want to obtain a tests list print it and exit
-    if args.list_tests:
-        tests_names = []
-        for cls in clsmembers:
-            if cls[0].startswith('test_WIM'):
-                tests_names.append(cls[0])
-
-        msg = "The 'wim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
-              "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
-              "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
-        print(msg)
-        logger.info(msg)
-        sys.exit(0)
-
-    # Create the list of tests to be run
-    code_based_tests = []
-    if args.tests:
-        for test in args.tests:
-            for t in test.split(','):
-                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
-                if len(matches_code_based_tests) > 0:
-                    code_based_tests.append(matches_code_based_tests[0][1])
-                else:
-                    logger.critical("Test '{}' is not among the possible ones".format(t))
-                    sys.exit(1)
-    if not code_based_tests:
-        # include all tests
-        for cls in clsmembers:
-            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
-            if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
-                code_based_tests.append(cls[1])
-
-    logger.debug("tests to be executed: {}".format(code_based_tests))
-
-    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
-    # This is handled in the tests using logging.
-    stream = open('/dev/null', 'w')
-
-    # Run code based tests
-    basic_tests_suite = unittest.TestSuite()
-    for test in code_based_tests:
-        basic_tests_suite.addTest(unittest.makeSuite(test))
-    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
-    executed += result.testsRun
-    failed += len(result.failures) + len(result.errors)
-    if failfast and failed:
-        sys.exit(1)
-    if len(result.failures) > 0:
-        logger.debug("failures : {}".format(result.failures))
-    if len(result.errors) > 0:
-        logger.debug("errors : {}".format(result.errors))
-    return executed, failed
-
-
-def test_deploy(args):
-    global test_config
-    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
-    import openmanoclient
-    executed = 0
-    failed = 0
-    test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
-    test_config["image_name"] = args.image_name
-    test_config["mgmt_net"] = args.mgmt_net
-    test_config["manual"] = args.manual
-    test_directory_content = os.listdir(test_config["test_directory"])
-    # If only want to obtain a tests list print it and exit
-    if args.list_tests:
-        msg = "the 'deploy' set tests are:\n\t" + ', '.join(sorted(test_directory_content))
-        print(msg)
-        # logger.info(msg)
-        sys.exit(0)
-
-    descriptor_based_tests = []
-    # Create the list of tests to be run
-    code_based_tests = []
-    if args.tests:
-        for test in args.tests:
-            for t in test.split(','):
-                if t in test_directory_content:
-                    descriptor_based_tests.append(t)
-                else:
-                    logger.critical("Test '{}' is not among the possible ones".format(t))
-                    sys.exit(1)
-    if not descriptor_based_tests:
-        # include all tests
-        descriptor_based_tests = test_directory_content
-
-    logger.debug("tests to be executed: {}".format(code_based_tests))
-
-    # import openmanoclient from relative path
-    test_config["client"] = openmanoclient.openmanoclient(
-        endpoint_url=args.endpoint_url,
-        tenant_name=args.tenant_name,
-        datacenter_name=args.datacenter,
-        debug=args.debug, logger=test_config["logger_name"])
-
-    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
-    # This is handled in the tests using logging.
-    stream = open('/dev/null', 'w')
-    # This scenario based tests are defined as directories inside the directory defined in 'test_directory'
-    for test in descriptor_based_tests:
-        test_config["test_folder"] = test
-        test_suite = unittest.TestSuite()
-        test_suite.addTest(unittest.makeSuite(descriptor_based_scenario_test))
-        result = unittest.TextTestRunner(stream=stream, failfast=False).run(test_suite)
-        executed += result.testsRun
-        failed += len(result.failures) + len(result.errors)
-        if failfast and failed:
-            sys.exit(1)
-        if len(result.failures) > 0:
-            logger.debug("failures : {}".format(result.failures))
-        if len(result.errors) > 0:
-            logger.debug("errors : {}".format(result.errors))
-
-    return executed, failed
-
-if __name__=="__main__":
-
-    parser = ArgumentParser(description='Test RO module')
-    parser.add_argument('-v','--version', action='version', help="Show current version",
-                             version='%(prog)s version ' + __version__  + ' ' + version_date)
-
-    # Common parameters
-    parent_parser = ArgumentParser(add_help=False)
-    parent_parser.add_argument('--failfast', help='Stop when a test fails rather than execute all tests',
-                      dest='failfast', action="store_true", default=False)
-    parent_parser.add_argument('--failed', help='Set logs to show only failed tests. --debug disables this option',
-                      dest='failed', action="store_true", default=False)
-    default_logger_file = os.path.dirname(__file__)+'/'+os.path.splitext(os.path.basename(__file__))[0]+'.log'
-    parent_parser.add_argument('--list-tests', help='List all available tests', dest='list_tests', action="store_true",
-                      default=False)
-    parent_parser.add_argument('--logger_file', dest='logger_file', default=default_logger_file,
-                               help='Set the logger file. By default '+default_logger_file)
-    parent_parser.add_argument("-t", '--tenant', dest='tenant_name', default="osm",
-                               help="Set the openmano tenant to use for the test. By default 'osm'")
-    parent_parser.add_argument('--debug', help='Set logs to debug level', dest='debug', action="store_true")
-    parent_parser.add_argument('--timeout', help='Specify the instantiation timeout in seconds. By default 300',
-                          dest='timeout', type=int, default=300)
-    parent_parser.add_argument('--test', '--tests', help='Specify the tests to run', dest='tests', action="append")
-
-    subparsers = parser.add_subparsers(help='test sets')
-
-    # Deployment test set
-    # -------------------
-    deploy_parser = subparsers.add_parser('deploy', parents=[parent_parser],
-                                          help="test deployment using descriptors at RO_test folder ")
-    deploy_parser.set_defaults(func=test_deploy)
-
-    # Mandatory arguments
-    mandatory_arguments = deploy_parser.add_argument_group('mandatory arguments')
-    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
-    mandatory_arguments.add_argument("-i", '--image-name', required=True, dest="image_name",
-                                     help='Image name available at datacenter used for the tests')
-    mandatory_arguments.add_argument("-n", '--mgmt-net-name', required=True, dest='mgmt_net',
-                                     help='Set the vim management network to use for tests')
-
-    # Optional arguments
-    deploy_parser.add_argument('-m', '--manual-check', dest='manual', action="store_true", default=False,
-                               help='Pause execution once deployed to allow manual checking of the '
-                                    'deployed instance scenario')
-    deploy_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
-                               help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
-
-    # Vimconn test set
-    # -------------------
-    vimconn_parser = subparsers.add_parser('vimconn', parents=[parent_parser], help="test vimconnector plugin")
-    vimconn_parser.set_defaults(func=test_vimconnector)
-    # Mandatory arguments
-    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
-    mandatory_arguments.add_argument('--vimtype', choices=['vmware', 'aws', 'openstack', 'openvim'], required=True,
-                                     help='Set the vimconnector type to test')
-    mandatory_arguments.add_argument('-c', '--config', dest='config_param', required=True,
-                                    help='Set the vimconnector specific config parameters in dictionary format')
-    mandatory_arguments.add_argument('-u', '--url', dest='endpoint_url',required=True, help="Set the vim connector url or Host IP")
-    # Optional arguments
-    vimconn_parser.add_argument('-i', '--image-path', dest='image_path', help="Provide image path present at RO container")
-    vimconn_parser.add_argument('-n', '--image-name', dest='image_name', help="Provide image name for test")
-    # TODO add optional arguments for vimconn tests
-    # vimconn_parser.add_argument("-i", '--image-name', dest='image_name', help='<HELP>'))
-    vimconn_parser.add_argument('-s', '--sriov-net-name', dest='sriov_net_name', help="Provide SRIOV network name for test")
-
-    # Datacenter test set
-    # -------------------
-    vimconn_parser = subparsers.add_parser('vim', parents=[parent_parser], help="test vim")
-    vimconn_parser.set_defaults(func=test_vim)
-
-    # Mandatory arguments
-    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
-    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
-
-    # Optional arguments
-    vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
-                               help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
-
-    # WIM test set
-    # -------------------
-    vimconn_parser = subparsers.add_parser('wim', parents=[parent_parser], help="test wim")
-    vimconn_parser.set_defaults(func=test_wim)
-
-    # Mandatory arguments
-    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
-    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
-
-    # Optional arguments
-    vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
-                                help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
-
-    argcomplete.autocomplete(parser)
-    args = parser.parse_args()
-    # print str(args)
-    test_config = {}
-
-    # default logger level is INFO. Options --debug and --failed override this, being --debug prioritary
-    logger_level = 'INFO'
-    if args.debug:
-        logger_level = 'DEBUG'
-    elif args.failed:
-        logger_level = 'WARNING'
-    logger_name = os.path.basename(__file__)
-    test_config["logger_name"] = logger_name
-    logger = logging.getLogger(logger_name)
-    logger.setLevel(logger_level)
-    failfast = args.failfast
-
-    # Configure a logging handler to store in a logging file
-    if args.logger_file:
-        fileHandler = logging.FileHandler(args.logger_file)
-        formatter_fileHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
-        fileHandler.setFormatter(formatter_fileHandler)
-        logger.addHandler(fileHandler)
-
-    # Configure a handler to print to stdout
-    consoleHandler = logging.StreamHandler(sys.stdout)
-    formatter_consoleHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
-    consoleHandler.setFormatter(formatter_consoleHandler)
-    logger.addHandler(consoleHandler)
-
-    logger.debug('Program started with the following arguments: ' + str(args))
-
-    # set test config parameters
-    test_config["timeout"] = args.timeout
-    test_config["test_number"] = 1
-
-    executed, failed = args.func(args)
-
-    # Log summary
-    logger.warning("Total number of tests: {}; Total number of failures/errors: {}".format(executed, failed))
-    sys.exit(1 if failed else 0)
diff --git a/test/test_on_container.sh b/test/test_on_container.sh
deleted file mode 100755 (executable)
index e3400b0..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2017 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-##
-
-# Author: Alfonso Tierno (alfonso.tiernosepulveda@telefonica.com)
-
-description="It creates a new lxc container, installs RO from a concrete commit and executes validation tests.\
- An openvim in test mode is installed and used to validate"
-
-usage(){
-    echo -e "usage: ${BASH_SOURCE[0]} CONTAINER\n ${description}"
-    echo -e "  CONTAINER is the name of the container to be created. By default test1"\
-            "Warning! if a container with the same name exists, it will be deleted"
-    echo -e "  You must also supply at TEST_RO_COMMIT envioronmental variable with the git command"\
-            "to clone the version under test. It can be copy paste from gerrit. Examples:\n"\
-            " TEST_RO_COMMIT='git fetch https://osm.etsi.org/gerrit/osm/RO refs/changes/40/5540/1 && git checkout FETCH_HEAD'\n"\
-            " TEST_RO_COMMIT='git checkout v3.0.1'"
-    echo -e "  You can provide TEST_RO_GIT_URL, by default https://osm.etsi.org/gerrit/osm/RO is used"
-    echo -e "  You can provide TEST_RO_CONTAINER instead of by parameter, by default test1"
-    echo -e "  You can provide TEST_RO_CUSTOM, with a command for container customization, by default nothing."
-}
-
-[ "$1" = "--help" ] || [ "$1" = "-h" ] && usage && exit 0
-
-[[ -z "$TEST_RO_COMMIT" ]] && echo 'provide a TEST_RO_COMMIT variable. Type --help for more info' >&2 && exit 1
-[[ -z "$TEST_RO_GIT_URL" ]] && TEST_RO_GIT_URL="https://osm.etsi.org/gerrit/osm/RO"
-
-[ -n "$1" ] && TEST_RO_CONTAINER="$1"
-[[ -z "$TEST_RO_CONTAINER" ]] && TEST_RO_CONTAINER=test1
-
-instance_name=3vdu_2vnf
-
-function echo_RO_log(){
-    # echo "LOG DUMP:" >&2 && lxc exec "$TEST_RO_CONTAINER" -- tail -n 150 /var/log/osm/openmano.log >&2
-    echo -e "\nFAILED" >&2
-}
-
-function lxc_exec(){
-    if ! lxc exec "$TEST_RO_CONTAINER" --env OPENMANO_TENANT=osm --env OPENMANO_DATACENTER=local-openvim \
-        --env OPENVIM_TENANT="$OPENVIM_TENANT" -- bash -c "$*"
-    then
-        echo "ERROR on command '$*'" >&2
-        echo_RO_log
-        exit 1
-    fi
-}
-
-function wait_until_deleted(){
-    wait_active=0
-    while lxc_exec RO/test/local/openvim/openvim vm-list | grep -q -e ${instance_name} ||
-          lxc_exec RO/test/local/openvim/openvim net-list | grep -q -e ${instance_name}
-    do
-        echo -n "."
-        [ $wait_active -gt 90 ] &&  echo "timeout waiting VM and nets deleted at VIM" >&2 && echo_RO_log && exit 1
-        wait_active=$((wait_active + 1))
-        sleep 1
-    done
-    echo
-}
-
-lxc delete "$TEST_RO_CONTAINER" --force 2>/dev/null && echo "container '$TEST_RO_CONTAINER' deleted"
-lxc launch ubuntu:16.04 "$TEST_RO_CONTAINER"
-sleep 10
-[[ -n "$TEST_RO_CUSTOM" ]] && ${TEST_RO_CUSTOM}
-lxc_exec ifconfig eth0 mtu 1446  # Avoid problems when inside an openstack VM that normally limit MTU do this value
-lxc_exec git clone "$TEST_RO_GIT_URL"
-lxc_exec git -C RO status
-lxc_exec "cd RO && $TEST_RO_COMMIT"
-
-# TEST INSTALL
-lxc_exec RO/scripts/install-openmano.sh --noclone --force -q --updatedb -b master
-sleep 10
-lxc_exec openmano tenant-create osm
-lxc_exec openmano tenant-list
-
-# TEST database migration
-lxc_exec ./RO/database_utils/migrate_mano_db.sh 20
-lxc_exec ./RO/database_utils/migrate_mano_db.sh
-lxc_exec ./RO/database_utils/migrate_mano_db.sh 20
-lxc_exec ./RO/database_utils/migrate_mano_db.sh
-
-# TEST instantiate with a fake local openvim
-lxc_exec ./RO/test/basictest.sh -f --insert-bashrc --install-openvim reset add-openvim create delete
-
-
-# TEST instantiate with a fake local openvim 2
-lxc_exec ./RO/test/test_RO.py deploy -n mgmt -t osm -i cirros034 -d local-openvim --timeout=30 --failfast
-lxc_exec ./RO/test/test_RO.py vim  -t osm  -d local-openvim --timeout=30 --failfast
-
-sleep 10
-echo "TEST service restart in the middle of a instantiation/deletion"
-OPENVIM_TENANT=`lxc_exec RO/test/local/openvim/openvim tenant-list`
-OPENVIM_TENANT=${OPENVIM_TENANT%% *}
-
-lxc_exec openmano vnf-create RO/vnfs/examples/v3_3vdu_vnfd.yaml --image-name=cirros034
-lxc_exec openmano scenario-create RO/scenarios/examples/v3_3vdu_2vnf_nsd.yaml
-wait_until_deleted
-test_number=0
-while [ $test_number -lt 5 ] ; do
-    echo test ${test_number}.0 test instantiation recovering
-    lxc_exec openmano instance-scenario-create --name ${instance_name} --scenario osm_id=3vdu_2vnf_nsd";"service osm-ro stop
-    sleep 5
-    lxc_exec service osm-ro start
-    sleep 10
-    # wait until all VM are active
-    wait_active=0
-    while [ `lxc_exec openmano instance-scenario-list ${instance_name} | grep ACTIVE | wc -l` -lt 7 ] ; do
-        echo -n "."
-        [ $wait_active -gt 90 ] &&  echo "timeout waiting VM active" >&2 && echo_RO_log && exit 1
-        wait_active=$((wait_active + 1))
-        sleep 1
-    done
-    echo
-
-    # Due to race condition the VIM request can be processed without getting the response by RO
-    # resulting in having some VM or net at VIM not registered by RO. If this is the case need to be deleted manually
-    vim_vms=`lxc_exec RO/test/local/openvim/openvim vm-list | grep ${instance_name} | awk '{print $1}'`
-    for vim_vm in $vim_vms ; do
-        if ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q $vim_vm ; then
-            echo deleting VIM vm $vim_vm
-            lxc_exec RO/test/local/openvim/openvim vm-delete -f $vim_vm
-        fi
-    done
-    vim_nets=`lxc_exec RO/test/local/openvim/openvim net-list | grep ${instance_name} | awk '{print $1}'`
-    for vim_net in $vim_nets ; do
-        if ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q $vim_net ; then
-            echo deleting VIM net $vim_net
-            lxc_exec RO/test/local/openvim/openvim net-delete -f $vim_net
-        fi
-    done
-
-    # delete first VIM VM and wait until RO detects it
-    echo test ${test_number}.1 test refresh VM VIM status deleted
-    OPENVIM_VM=`lxc_exec RO/test/local/openvim/openvim vm-list`
-    OPENVIM_VM=${OPENVIM_VM%% *}
-    lxc_exec RO/test/local/openvim/openvim vm-delete -f $OPENVIM_VM
-    wait_active=0
-    while ! lxc_exec openmano instance-scenario-list ${instance_name} | grep -q DELETED ; do
-        echo -n "."
-        [ $wait_active -gt 90 ] &&  echo "timeout waiting RO get VM status as DELETED" >&2 && echo_RO_log && exit 1
-        wait_active=$((wait_active + 1))
-        sleep 1
-        ACTIVE=`lxc_exec openmano instance-scenario-list ${instance_name} | grep ACTIVE | wc -l`
-    done
-    echo
-
-    # TEST service restart in the middle of a instantiation deletion
-    echo test ${test_number}.2 test isntantiation deletion recovering
-    lxc_exec openmano instance-scenario-delete ${instance_name} -f";"service osm-ro stop
-    sleep 5
-    lxc_exec service osm-ro start
-    sleep 10
-    # wait until all VM are deteled at VIM
-    wait_until_deleted
-
-    test_number=$((test_number + 1))
-done
-echo "DONE"
-
-
diff --git a/test/test_openmanocli.sh b/test/test_openmanocli.sh
deleted file mode 100755 (executable)
index 0490467..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#This script can be used as a basic test of openmano.
-#WARNING: It destroy the database content
-
-
-function usage(){
-    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test openmano with fake tenant, datancenters, etc."\
-            "It assumes that you have configured openmano cli with HOST,PORT,TENANT with environment variables"
-            "If not, it will use by default localhost:9080 and creates a new TENANT"
-    echo -e "    -h --help        shows this help"
-}
-
-function is_valid_uuid(){
-    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
-    return 1
-}
-
-DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-DIRmano=$(dirname $DIRNAME)
-DIRscript=${DIRmano}/scripts
-
-#detect paths of executables, preceding the relative paths
-openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
-service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
-    service_openmano="$DIRscript/service-openmano"
-initopenvim="initopenvim"
-openvim="openvim"
-
-function _exit()
-{
-    EXIT_STATUS=$1
-    for item in $ToDelete
-    do
-        command=${item%%:*}
-        uuid=${item#*:}
-        [[ $command == "datacenter-detach" ]] && force="" || force=-f
-        printf "%-50s" "$command $uuid:"
-        ! $openmano $command $uuid $force >> /dev/null && echo FAIL && EXIT_STATUS=1 || echo OK
-     done
-    [[ ${BASH_SOURCE[0]} != $0 ]] && return $1 || exit $EXIT_STATUS
-}
-
-
-# process options
-source ${DIRscript}/get-options.sh "force:-f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
-                $* || _exit 1
-
-# help
-[ -n "$option_help" ] && usage && _exit 0
-
-
-ToDelete=""
-DCs="dc-fake1-openstack dc-fake2-openvim" #dc-fake3-vmware
-Ts="fake-tenant1 fake-tenand2"
-SDNs="sdn-fake1-opendaylight sdn-fake2-floodlight sdn-fake3-onos"
-
-for T in $Ts
-do
-    printf "%-50s" "Creating fake tenant '$T':"
-    ! result=`$openmano tenant-create "$T"` && echo FAIL && echo "    $result" && _exit 1
-    tenant=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $tenant && echo "FAIL" && echo "    $result" && _exit 1
-    echo $tenant
-    ToDelete="tenant-delete:$tenant $ToDelete"
-    [[ -z "$OPENMANO_TENANT" ]] && export OPENMANO_TENANT=$tenant
-done
-
-index=0
-for DC in $DCs
-do
-    index=$((index+1))
-    printf "%-50s" "Creating datacenter '$DC':"
-    ! result=`$openmano datacenter-create "$DC" "http://$DC/v2.0" --type=${DC##*-} --config='{insecure: True}'` &&
-        echo FAIL && echo "    $result" && _exit 1
-    datacenter=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && _exit 1
-    echo $datacenter
-    eval DC${index}=$datacenter
-    ToDelete="datacenter-delete:$datacenter $ToDelete"
-    [[ -z "$datacenter_empty" ]] && datacenter_empty=datacenter
-
-    printf "%-50s" "Attaching openmano tenant to the datacenter:"
-    ! result=`$openmano datacenter-attach "$DC" --vim-tenant-name=osm --config='{insecure: False}'` &&
-        echo FAIL && echo "    $result" && _exit 1
-    ToDelete="datacenter-detach:$datacenter $ToDelete"
-    echo OK
-done
-
-printf "%-50s" "Datacenter list:"
-! result=`$openmano datacenter-list` &&
-    echo  "FAIL" && echo "    $result" && _exit 1
-for verbose in "" -v -vv -vvv
-do
-    ! result=`$openmano datacenter-list "$DC" $verbose` &&
-        echo  "FAIL" && echo "    $result" && _exit 1
-done
-echo OK
-
-dpid_prefix=55:56:57:58:59:60:61:0
-dpid_sufix=0
-for SDN in $SDNs
-do
-    printf "%-50s" "Creating SDN controller '$SDN':"
-    ! result=`$openmano sdn-controller-create "$SDN" --ip 4.5.6.7 --port 80 --type=${SDN##*-} \
-        --user user --passwd p --dpid=${dpid_prefix}${dpid_sufix}` && echo "FAIL" && echo "    $result" && _exit 1
-    sdn=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $sdn && echo "FAIL" && echo "    $result" && _exit 1
-    echo $sdn
-    ToDelete="sdn-controller-delete:$sdn $ToDelete"
-    dpid_sufix=$((dpid_sufix+1))
-
-done
-printf "%-50s" "Edit SDN-controller:"
-for edit in user=u password=p ip=5.6.6.7 port=81 name=name dpid=45:55:54:45:44:44:55:67
-do
-    ! result=`$openmano sdn-controller-edit $sdn -f --"${edit}"` &&
-        echo  "FAIL" && echo "    $result" && _exit 1
-done
-echo OK
-
-printf "%-50s" "SDN-controller list:"
-! result=`$openmano sdn-controller-list` &&
-    echo  "FAIL" && echo "    $result" && _exit 1
-for verbose in "" -v -vv -vvv
-do
-    ! result=`$openmano sdn-controller-list "$sdn" $verbose` &&
-        echo  "FAIL" && echo "    $result" && _exit 1
-done
-echo OK
-
-printf "%-50s" "Add sdn to datacenter:"
-! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Clear Port mapping:"
-! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Set Port mapping:"
-! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "List Port mapping:"
-for verbose in "" -v -vv -vvv
-do
-    ! result=`$openmano datacenter-sdn-port-mapping-list "$DC" $verbose` &&
-        echo  "FAIL" && echo "    $result" && _exit 1
-done
-echo OK
-
-printf "%-50s" "Set again Port mapping:"
-! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Clear again Port mapping:"
-! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Set again Port mapping:"
-! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Remove datacenter sdn:"
-! result=`$openmano datacenter-edit -f $DC --sdn-controller null` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Negative list port mapping:"
-result=`$openmano datacenter-sdn-port-mapping-list $DC` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Add again datacenter sdn:"
-! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-printf "%-50s" "Empty list port mapping:"
-! [[ `$openmano datacenter-sdn-port-mapping-list $DC | wc -l` -eq 6 ]] &&
-    echo "FAIL" && _exit 1 || echo OK
-
-printf "%-50s" "Set again Port mapping:"
-! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
-    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
-
-_exit 0
-
diff --git a/test/test_openmanoclient.py b/test/test_openmanoclient.py
deleted file mode 100755 (executable)
index 6bdd67c..0000000
+++ /dev/null
@@ -1,505 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-Module to test openmanoclient class and indirectly the whole openmano
-It allows both python 2 and python 3
-'''
-__author__="Alfonso Tierno"
-__date__ ="$09-Mar-2016 09:09:48$"
-__version__="0.0.2"
-version_date="May 2016"
-
-import logging
-import imp 
-        
-
-
-def _get_random_name(maxLength):
-    '''generates a string with random craracters from space (ASCCI 32) to ~(ASCCI 126)
-    with a random length up to maxLength
-    '''
-    long_name = "testing up to {} size name: ".format(maxLength) 
-    #long_name += ''.join(chr(random.randint(32,126)) for _ in range(random.randint(20, maxLength-len(long_name))))
-    long_name += ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ') for _ in range(20, maxLength-len(long_name)))
-    return long_name
-
-
-if __name__=="__main__":
-    import getopt
-    #import os
-    import sys
-    
-
-
-    usage =\
-    """Make a test against an openmano server.\nUsage: test_openmanoclient [options]
-    -v|--verbose: prints more info in the test
-    --version:    shows current version
-    -h|--help:    shows this help
-    -d|--debug:   set logs to debug level
-    -t|--tenant:  set the tenant name to test. By default creates one
-    --datacenter: set the datacenter name to test. By default creates one at http://localhost:9080/openvim
-    -u|--url:     set the openmano server url. By default 'http://localhost:9090/openmano'
-    --image:      use this image path for testing a VNF. By default a fake one is generated, valid for VIM in test mode'
-    """
-
-    #import openmanoclient from relative path
-    module_info = imp.find_module("openmanoclient", [".."] )
-    Client = imp.load_module("Client", *module_info)
-    
-    streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
-    logging.basicConfig(format=streamformat)
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "t:u:dhv", ["url=", "tenant=", "debug", "help", "version", "verbose", "datacenter=", "image="])
-    except getopt.GetoptError as err:
-        print ("Error: {}\n Try '{} --help' for more information".format(str(err), sys.argv[0]))
-        sys.exit(2)
-
-    debug = False
-    verbose = False
-    url = "http://localhost:9090/openmano"
-    to_delete_list=[]
-    test_tenant = None
-    test_datacenter = None
-    test_vim_tenant = None
-    test_image = None
-    for o, a in opts:
-        if o in ("-v", "--verbose"):
-            verbose = True
-        elif o in ("--version"):
-            print ("{} version".format(sys.argv[0]), __version__, version_date)
-            print ("(c) Copyright Telefonica")
-            sys.exit()
-        elif o in ("-h", "--help"):
-            print(usage)
-            sys.exit()
-        elif o in ("-d", "--debug"):
-            debug = True
-        elif o in ("-u", "--url"):
-            url = a
-        elif o in ("-t", "--tenant"):
-            test_tenant = a 
-        elif o in ("--datacenter"):
-            test_datacenter = a 
-        elif o in ("--image"):
-            test_image = a 
-        else:
-            assert False, "Unhandled option"
-
-    
-    
-    client = Client.openmanoclient(
-                            endpoint_url=url, 
-                            tenant_name=test_tenant,
-                            datacenter_name = test_datacenter,
-                            debug = debug)
-
-    import random
-    test_number=1
-    
-    #TENANTS
-    print("  {}. TEST create_tenant".format(test_number))
-    test_number += 1
-    long_name = _get_random_name(60)
-
-    tenant = client.create_tenant(name=long_name, description=long_name)
-    if verbose: print(tenant)
-
-    print("  {}. TEST list_tenants".format(test_number))
-    test_number += 1
-    tenants = client.list_tenants()
-    if verbose: print(tenants)
-    
-    print("  {}. TEST list_tenans filter by name".format(test_number))
-    test_number += 1
-    tenants_ = client.list_tenants(name=long_name)
-    if not tenants_["tenants"]:
-        raise Exception("Text error, no TENANT found with name")
-    if verbose: print(tenants_)
-    
-    print("  {}. TEST get_tenant by UUID".format(test_number))
-    test_number += 1
-    tenant = client.get_tenant(uuid=tenants_["tenants"][0]["uuid"])
-    if verbose: print(tenant)
-        
-    print("  {}. TEST delete_tenant by name".format(test_number))
-    test_number += 1
-    tenant = client.delete_tenant(name = long_name)
-    if verbose: print(tenant)
-    
-    if not test_tenant:
-        print("  {}. TEST create_tenant for remaining tests".format(test_number))
-        test_number += 1
-        test_tenant = "test-tenant "+\
-        ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
-        tenant = client.create_tenant(name = test_tenant)
-        if verbose: print(tenant)
-        client["tenant_name"] = test_tenant
-        
-        to_delete_list.insert(0,{"item": "tenant", "function": client.delete_tenant, "params":{"name": test_tenant} })
-
-    #DATACENTERS
-    print("  {}. TEST create_datacenter".format(test_number))
-    test_number += 1
-    long_name = _get_random_name(60)
-
-    datacenter = client.create_datacenter(name=long_name, vim_url="http://fakeurl/fake")
-    if verbose: print(datacenter)
-
-    print("  {}. TEST list_datacenters".format(test_number))
-    test_number += 1
-    datacenters = client.list_datacenters(all_tenants=True)
-    if verbose: print(datacenters)
-    
-    print("  {}. TEST list_tenans filter by name".format(test_number))
-    test_number += 1
-    datacenters_ = client.list_datacenters(all_tenants=True, name=long_name)
-    if not datacenters_["datacenters"]:
-        raise Exception("Text error, no TENANT found with name")
-    if verbose: print(datacenters_)
-    
-    print("  {}. TEST get_datacenter by UUID".format(test_number))
-    test_number += 1
-    datacenter = client.get_datacenter(uuid=datacenters_["datacenters"][0]["uuid"], all_tenants=True)
-    if verbose: print(datacenter)
-        
-    print("  {}. TEST delete_datacenter by name".format(test_number))
-    test_number += 1
-    datacenter = client.delete_datacenter(name=long_name)
-    if verbose: print(datacenter)
-    
-    if not test_datacenter:
-        print("  {}. TEST create_datacenter for remaining tests".format(test_number))
-        test_number += 1
-        test_datacenter = "test-datacenter "+\
-        ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
-        datacenter = client.create_datacenter(name=test_datacenter, vim_url="http://127.0.0.1:9080/openvim")
-        if verbose: print(datacenter)
-        client["datacenter_name"] = test_datacenter
-        to_delete_list.insert(0,{"item": "datacenter", "function": client.delete_datacenter,
-                                  "params":{
-                                        "name": test_datacenter
-                                    } 
-                                 })
-
-        print("  {}. TEST datacenter new tenenat".format(test_number))
-        test_number += 1
-        test_vim_tenant = "test-vimtenant "+\
-        ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
-        vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True, name=test_vim_tenant)
-        if verbose: print(vim_tenant)
-        client["datacenter_name"] = test_datacenter
-        to_delete_list.insert(0,{"item": "vim_tenant", 
-                                 "function": client.vim_action,
-                                  "params":{
-                                            "action":"delete",
-                                            "item":"tenants",
-                                            "datacenter_name": test_datacenter,
-                                            "all_tenants": True,
-                                            "uuid": vim_tenant["tenant"]["id"]
-                                            }
-                                 })
-
-        print("  {}. TEST datacenter attach".format(test_number))
-        test_number += 1
-        datacenter = client.attach_datacenter(name=test_datacenter, vim_tenant_name=test_vim_tenant)
-        if verbose: print(datacenter)
-        client["datacenter_name"] = test_datacenter
-        to_delete_list.insert(0,{"item": "datacenter-detach", "function": client.detach_datacenter, "params":{"name": test_datacenter} })
-
-        client["datacenter_name"] = test_datacenter
-
-        # WIMs
-        print("  {}. TEST create_wim".format(test_number))
-        test_number += 1
-        long_name = _get_random_name(60)
-
-        wim = client.create_wim(name=long_name, wim_url="http://fakeurl/fake")
-        if verbose: print(wim)
-
-        print("  {}. TEST list_wims".format(test_number))
-        test_number += 1
-        wims = client.list_wims(all_tenants=True)
-        if verbose: print(wims)
-
-        print("  {}. TEST list_tenans filter by name".format(test_number))
-        test_number += 1
-        wims_ = client.list_wims(all_tenants=True, name=long_name)
-        if not wims_["wims"]:
-            raise Exception("Text error, no TENANT found with name")
-        if verbose: print(wims_)
-
-        print("  {}. TEST get_wim by UUID".format(test_number))
-        test_number += 1
-        wim = client.get_wim(uuid=wims_["wims"][0]["uuid"], all_tenants=True)
-        if verbose: print(wim)
-
-        print("  {}. TEST delete_wim by name".format(test_number))
-        test_number += 1
-        wim = client.delete_wim(name=long_name)
-        if verbose: print(wim)
-
-        print("  {}. TEST create_wim for remaining tests".format(test_number))
-        test_number += 1
-        test_wim = "test-wim " + \
-                          ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
-        wim = client.create_wim(name=test_wim, vim_url="http://127.0.0.1:9080/odl")
-        if verbose: print(wim)
-        to_delete_list.insert(0,
-                              {
-                                    "item": "wim", "function": client.delete_wim,
-                                    "params":
-                                        {
-                                                "name": test_wim
-                                        }
-                                })
-
-        test_wim_tenant = "test-wimtenant " + \
-                           ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
-
-        # print("  {}. TEST datacenter new tenenat".format(test_number))
-        # test_number += 1
-        # test_vim_tenant = "test-vimtenant " + \
-        #                   ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
-        # vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True,
-        #                                name=test_vim_tenant)
-        # if verbose: print(vim_tenant)
-        # client["datacenter_name"] = test_datacenter
-        # to_delete_list.insert(0, {"item": "vim_tenant",
-        #                           "function": client.vim_action,
-        #                           "params": {
-        #                               "action": "delete",
-        #                               "item": "tenants",
-        #                               "datacenter_name": test_datacenter,
-        #                               "all_tenants": True,
-        #                               "uuid": vim_tenant["tenant"]["id"]
-        #                           }
-        #                           })
-
-        print("  {}. TEST wim attach".format(test_number))
-        test_number += 1
-        wim = client.attach_wim(name=test_wim, wim_tenant_name=test_wim_tenant)
-        if verbose: print(wim)
-        to_delete_list.insert(0, {"item": "wim-detach", "function": client.detach_wim,
-                                  "params": {"name": test_wim}})
-    
-    #VIM_ACTIONS
-    print("  {}. TEST create_VIM_tenant".format(test_number))
-    test_number += 1
-    long_name = _get_random_name(60)
-
-    tenant = client.vim_action("create", "tenants", name=long_name)
-    if verbose: print(tenant)
-    tenant_uuid = tenant["tenant"]["id"] 
-
-    print("  {}. TEST list_VIM_tenants".format(test_number))
-    test_number += 1
-    tenants = client.vim_action("list", "tenants")
-    if verbose: print(tenants)
-    
-    print("  {}. TEST get_VIM_tenant by UUID".format(test_number))
-    test_number += 1
-    tenant = client.vim_action("show", "tenants", uuid=tenant_uuid)
-    if verbose: print(tenant)
-        
-    print("  {}. TEST delete_VIM_tenant by id".format(test_number))
-    test_number += 1
-    tenant = client.vim_action("delete", "tenants", uuid = tenant_uuid)
-    if verbose: print(tenant)
-    
-    print("  {}. TEST create_VIM_network".format(test_number))
-    test_number += 1
-    long_name = _get_random_name(60)
-
-    network = client.vim_action("create", "networks", name=long_name)
-    if verbose: print(network)
-    network_uuid = network["network"]["id"] 
-
-    print("  {}. TEST list_VIM_networks".format(test_number))
-    test_number += 1
-    networks = client.vim_action("list", "networks")
-    if verbose: print(networks)
-    
-    print("  {}. TEST get_VIM_network by UUID".format(test_number))
-    test_number += 1
-    network = client.vim_action("show", "networks", uuid=network_uuid)
-    if verbose: print(network)
-        
-    print("  {}. TEST delete_VIM_network by id".format(test_number))
-    test_number += 1
-    network = client.vim_action("delete", "networks", uuid = network_uuid)
-    if verbose: print(network)
-    #VNFS
-    print("  {}. TEST create_vnf".format(test_number))
-    test_number += 1
-    test_vnf_name = _get_random_name(255)
-    if test_image:
-        test_vnf_path = test_image
-    else:
-        test_vnf_path = "/random/path/" + "".join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ') for _ in range(20))
-    
-    vnf_descriptor={'vnf': {'name': test_vnf_name, 
-                                'VNFC': [{'description': _get_random_name(255),
-                                          'name': 'linux-VM',
-                                          'VNFC image': test_vnf_path,
-                                          'ram': 1024,
-                                          'vcpus': 1,
-                                          'bridge-ifaces': [{'name': 'eth0'}]
-                                        }],
-                                'description': _get_random_name(255),
-                                'nets': [], 
-                                'external-connections': [{'name': 'eth0', 
-                                                          'local_iface_name': 'eth0',
-                                                          'VNFC': 'linux-VM',
-                                                          'type': 'bridge'}], 
-                                'public': False}}
-
-    vnf = client.create_vnf(descriptor=vnf_descriptor)
-    if verbose: print(vnf)
-    to_delete_list.insert(0,{"item": "vnf", "function": client.delete_vnf, "params":{"name": test_vnf_name} })
-
-    print("  {}. TEST list_vnfs".format(test_number))
-    test_number += 1
-    vnfs = client.list_vnfs()
-    if verbose: print(vnfs)
-    
-    print("  {}. TEST list_vnfs filter by name".format(test_number))
-    test_number += 1
-    vnfs_ = client.list_vnfs(name=test_vnf_name)
-    if not vnfs_["vnfs"]:
-        raise Exception("Text error, no VNF found with name")
-    if verbose: print(vnfs_)
-    
-    print("  {}. TEST get_vnf by UUID".format(test_number))
-    test_number += 1
-    vnf = client.get_vnf(uuid=vnfs_["vnfs"][0]["uuid"])
-    if verbose: print(vnf)
-
-    #SCENARIOS
-    print("  {}. TEST create_scenario".format(test_number))
-    test_number += 1
-    test_scenario_name = _get_random_name(255)
-    
-    scenario_descriptor={   'schema_version': 2,
-                            'scenario': {
-                                'name': test_scenario_name, 
-                                'description': _get_random_name(255),
-                                'public': True,
-                                'vnfs':{
-                                    'vnf1': {
-                                        'vnf_name': test_vnf_name
-                                    }
-                                },
-                                'networks':{
-                                    'net1':{
-                                        'external': True,
-                                        'interfaces': [
-                                            {'vnf1': 'eth0'}
-                                        ]
-                                    }
-                                }
-                            }
-                        }
-
-    scenario = client.create_scenario(descriptor=scenario_descriptor)
-    if verbose: print(scenario)
-    to_delete_list.insert(0,{"item": "scenario", "function": client.delete_scenario, "params":{"name": test_scenario_name} })
-
-    print("  {}. TEST list_scenarios".format(test_number))
-    test_number += 1
-    scenarios = client.list_scenarios()
-    if verbose: print(scenarios)
-    
-    print("  {}. TEST list_scenarios filter by name".format(test_number))
-    test_number += 1
-    scenarios_ = client.list_scenarios(name=test_scenario_name)
-    if not scenarios_["scenarios"]:
-        raise Exception("Text error, no VNF found with name")
-    if verbose: print(scenarios_)
-    
-    print("  {}. TEST get_scenario by UUID".format(test_number))
-    test_number += 1
-    scenario = client.get_scenario(uuid=scenarios_["scenarios"][0]["uuid"])
-    if verbose: print(scenario)
-
-
-
-    #INSTANCES
-    print("  {}. TEST create_instance".format(test_number))
-    test_number += 1
-    test_instance_name = _get_random_name(255)
-    
-    instance_descriptor={   'schema_version': 2,
-                            'instance': {
-                                'name': test_instance_name, 
-                                'description': _get_random_name(255),
-                                'public': True,
-                                'vnfs':{
-                                    'vnf1': {
-                                        'vnf_name': test_vnf_name
-                                    }
-                                },
-                                'networks':{
-                                    'net1':{
-                                        'external': True,
-                                        'interfaces': [
-                                            {'vnf1': 'eth0'}
-                                        ]
-                                    }
-                                }
-                            }
-                        }
-
-    instance = client.create_instance(scenario_name=test_scenario_name, name=test_instance_name )
-    if verbose: print(instance)
-    to_delete_list.insert(0,{"item": "instance", "function": client.delete_instance, "params":{"name": test_instance_name} })
-
-    print("  {}. TEST list_instances".format(test_number))
-    test_number += 1
-    instances = client.list_instances()
-    if verbose: print(instances)
-    
-    print("  {}. TEST list_instances filter by name".format(test_number))
-    test_number += 1
-    instances_ = client.list_instances(name=test_instance_name)
-    if not instances_["instances"]:
-        raise Exception("Text error, no VNF found with name")
-    if verbose: print(instances_)
-    
-    print("  {}. TEST get_instance by UUID".format(test_number))
-    test_number += 1
-    instance = client.get_instance(uuid=instances_["instances"][0]["uuid"])
-    if verbose: print(instance)
-
-
-
-
-    #DELETE Create things
-    for item in to_delete_list:
-        print("  {}. TEST delete_{}".format(test_number, item["item"]))
-        test_number += 1
-        response = item["function"](**item["params"]) 
-        if verbose: print(response)
-    
diff --git a/test/test_osconnector.py b/test/test_osconnector.py
deleted file mode 100755 (executable)
index 92d5437..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-test_osconnector.py makes a test over osconnector.py (openstack connector)
-credentiasl must be provided with environment bash variables or arguments
-'''
-__author__="Alfonso Tierno, Gerardo Garcia"
-__date__ ="$22-jun-2014 11:19:29$"
-
-
-import os
-import sys
-import getopt
-#import yaml
-#from jsonschema import validate as js_v, exceptions as js_e
-
-#load osconnector, insert openmano directory in the path
-r=sys.argv[0].rfind('/')
-if r<0:
-    osconnector_path=".."
-else:
-    osconnector_path=sys.argv[0][:r+1]+".."
-sys.path.insert(0, osconnector_path)
-#sys.path.insert(0, '/home/atierno/workspace/openmano/openmano')
-import osconnector
-
-version="0.1"
-
-def usage():
-    print "Usage: ", sys.argv[0], "[options]"
-    print "  -v|--version            openstack version (by default 2)"
-    print "  -u|--username USER      user to authenticate (by default bash:OS_USERNAME)"
-    print "  -p|--password PASSWD    password to authenticate (by default bash:OS_PASSWORD)"
-    print "  -U|--auth_url URL       url of authentication over keystone (by default bash:OS_AUTH_URL)"
-    print "  -t|--tenant_name TENANT password to authenticate (by default bash:OS_TENANT_NAME)"
-    print "  -i|--image IMAGE        use this local path or url for loading image (by default cirros)"
-    print "  --skip-admin-tests      skip tests that requires administrative permissions, like create tenants"
-    print "  -h|--help               shows this help"
-    return
-
-def delete_items():
-    global myvim
-    global rollback_list
-    print "Making rollback, deleting items"
-    for i in range(len(rollback_list)-1, -1, -1):
-        item,name,id_ = rollback_list[i]
-        if item=="creds":
-            print ("changing credentials %s='%s'" % (name, id_)).ljust(50),
-        else:
-            print ("deleting %s '%s'" % (item, name)).ljust(50),
-        sys.stdout.flush()
-        if item=="flavor":
-            result,message=myvim.delete_tenant_flavor(id_)
-        elif item=="image":
-            result,message=myvim.delete_tenant_image(id_)
-        elif item=="tenant":
-            result,message=myvim.delete_tenant(id_)
-        elif item=="user":
-            result,message=myvim.delete_user(id_)
-        elif item=="network":
-            result,message=myvim.delete_tenant_network(id_)
-        elif item=="vm":
-            result,message=myvim.delete_tenant_vminstance(id_)
-        elif item=="creds":
-            try:
-                myvim[name]=id_
-                result=1
-            except Exception as e:
-                result=-1
-                message= "  " + str(type(e))[6:-1] + ": "+  str(e)
-        else:
-            print "Internal error unknown item rollback %s,%s,%s" % (item,name,id_)
-            continue
-        if result<0:
-            print " Fail"
-            print "  VIM response:", message
-            continue
-        else:
-            print " Ok"
-
-if __name__=="__main__":
-    global myvim
-    global rollback_list
-    #print "(c) Copyright Telefonica"
-    rollback_list=[]
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hv:u:U:p:t:i:",
-                 ["username=", "help", "version=", "password=", "tenant=", "url=","skip-admin-tests",'image='])
-    except getopt.GetoptError as err:
-        # print help information and exit:
-        print "Error:", err # will print something like "option -a not recognized"
-        usage()
-        sys.exit(2)
-        
-    creds = {}
-    creds['version'] = os.environ.get('OS_VERSION', '2')
-    creds['username'] = os.environ.get('OS_USERNAME')
-    creds['password'] = os.environ.get('OS_PASSWORD')
-    creds['auth_url'] = os.environ.get('OS_AUTH_URL')
-    creds['tenant_name'] = os.environ.get('OS_TENANT_NAME')
-    skip_admin_tests=False
-    image_path="http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img"
-    for o, a in opts:
-        if o in ("-h", "--help"):
-            usage()
-            sys.exit()
-        elif o in ("-v", "--version"):
-            creds['version']=a
-        elif o in ("-u", "--username"):
-            creds['username']=a
-        elif o in ("-p", "--password"):
-            creds['password']=a
-        elif o in ("-U", "--auth_url"):
-            creds['auth_url']=a
-        elif o in ("-t", "--tenant_name"):
-            creds['tenant_name']=a
-        elif o in ("-i", "--image"):
-            image_path=a
-        elif o=="--skip-admin-tests":
-            skip_admin_tests=True
-        else:
-            assert False, "Unhandled option"
-    if creds['auth_url']==None:
-        print "you must provide openstack url with -U or bash OS_AUTH_URL"
-        sys.exit()
-    print "creds:", creds
-    
-
-    try:
-        print 'load osconnector class'.ljust(50),
-        sys.stdout.flush()
-        try:
-            myvim=osconnector.osconnector(uuid=None, name='test-openstack', tenant=creds['tenant_name'], 
-                url=creds['auth_url'], url_admin=None,
-                user=creds['username'], passwd=creds['password'],
-                debug = False, config={'network_vlan_ranges':'physnet_sriov'} )
-            print " Ok"
-        except Exception as e:
-            print " Fail"
-            print str(type(e))[6:-1] + ": "+  str(e) 
-            exit(-1)
-        
-        if not skip_admin_tests:
-            tenant_name="tos-tenant"
-            print ("creating new tenant '%s'" % tenant_name).ljust(50),
-            sys.stdout.flush()
-            result,new_tenant=myvim.new_tenant(tenant_name, "test tenant_description, trying a long description to get the limit. 2 trying a long description to get the limit. 3. trying a long description to get the limit.")
-            if result<0:
-                print " Fail"
-                print "  you can skip tenant creation with param'--skip-admin-tests'"
-                print "  VIM response:", new_tenant
-                exit(-1)
-            else:
-                print " Ok", new_tenant
-                rollback_list.append(("tenant",tenant_name,new_tenant))
-
-            user_name="tos-user"
-            print ("creating new user '%s'" % user_name).ljust(50),
-            sys.stdout.flush()
-            result,new_user=myvim.new_user(user_name, user_name, tenant_id=new_tenant)
-            if result<0:
-                print " Fail"
-                print "  VIM response:", new_user
-                exit(-1)
-            else:
-                print " Ok", new_user
-                rollback_list.append(("user",user_name,new_user))
-                    
-        name="tos-fl1"
-        print ("creating new flavor '%s'"%name).ljust(50),
-        sys.stdout.flush()
-        flavor={}
-        flavor['name']=name
-        result,new_flavor1=myvim.new_tenant_flavor(flavor, True)
-        if result<0:
-            print " Fail"
-            print "  VIM response:", new_flavor1
-            exit(-1)
-        else:
-            print " Ok", new_flavor1
-            rollback_list.append(("flavor",name,new_flavor1))
-            
-        name="tos-cirros"
-        print ("creating new image '%s'"%name).ljust(50),
-        sys.stdout.flush()
-        image={}
-        image['name']=name
-        image['location']=image_path #"/home/atierno/cirros-0.3.3-x86_64-disk.img"
-        result,new_image1=myvim.new_tenant_image(image)
-        if result<0:
-            print " Fail"
-            print "  VIM response:", new_image1
-            exit(-1)
-        else:
-            print " Ok", new_image1
-            rollback_list.append(("image",name, new_image1))
-
-        if not skip_admin_tests:
-            try:
-                print 'changing credentials to new tenant'.ljust(50),
-                sys.stdout.flush()
-                myvim['tenant']  =tenant_name
-                myvim['user']=user_name
-                myvim['passwd']=user_name
-                print " Ok"
-                rollback_list.append(("creds", "tenant", creds["tenant_name"]))
-                rollback_list.append(("creds", "user",   creds["username"]))
-                rollback_list.append(("creds", "passwd", creds["password"]))
-            except Exception as e:
-                print " Fail"
-                print " Error setting osconnector to new tenant:", str(type(e))[6:-1] + ": "+  str(e)
-                exit(-1)
-
-        name="tos-net-bridge"
-        print ("creating new net '%s'"%name).ljust(50),
-        sys.stdout.flush()
-        result,new_net1=myvim.new_tenant_network(name, "bridge")
-        if result<0:
-            print " Fail"
-            print "  VIM response:", new_net1
-            exit(-1)
-        else:
-            print " Ok", new_net1
-            rollback_list.append(("network",name, new_net1))
-
-        name="tos-vm-cloud"
-        print ("creating new VM '%s'"%name).ljust(50),
-        sys.stdout.flush()
-        result,new_vm1=myvim.new_tenant_vminstance(name, "vm-cloud-description", False,new_image1,new_flavor1,
-                                    [{"net_id":new_net1, "type":"virtio"}] )
-        if result<0:
-            print " Fail"
-            print "  VIM response:", new_vm1
-            exit(-1)
-        else:
-            print " Ok", new_vm1
-            rollback_list.append(("vm",name, new_vm1))
-
-            
-        print 'DONE  Ok'
-        print "Type ENTER to delete items"
-        raw_input('> ')  
-        exit()      
-              
-    except KeyboardInterrupt:
-        print " Canceled!"
-    except SystemExit:
-        pass
-    if len(rollback_list):
-        delete_items()
-
diff --git a/test/test_vimconn.sh b/test/test_vimconn.sh
deleted file mode 100755 (executable)
index 0f84af3..0000000
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#This script can be used as a basic test of openmano deployment over a vim
-#in order to use you need to set the VIM_XXXX bash variables with a vim values
-#    VIM_TYPE         openstack or openvim
-#    VIM_USERNAME     e.g.: admin
-#    VIM_PASSWORD     
-#    VIM_AUTH_URL     url to access VIM e.g. http:/openstack:35357/v2.0
-#    VIM_AUTH_URL_ADMIN admin url
-#    VIM_TENANT_NAME  e.g.: admin
-#    VIM_CONFIG       e.g.: "'network_vlan_ranges: sriov_net'"
-#    VIM_TEST_IMAGE_PATH_LINUX  image path(location) to use by the VNF linux
-#    VIM_TEST_IMAGE_PATH_NFV image path(location) to use by the VNF dataplaneVNF_2VMs and dataplaneVNF3
-
-#it should be used with source. It can modifies /home/$USER/.bashrc appending the variables
-#you need to delete them manually if desired
-
-function usage(){
-    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test VIM managing from openmano"
-    echo -e "  <action> is a list of the following items (by default 'reset create')"
-    echo -e "    reset     reset the openmano database content"
-    echo -e "    create    creates items at VIM"
-    echo -e "    delete    delete created items"
-    echo -e "  OPTIONS:"
-    echo -e "    -f --force       does not prompt for confirmation"
-    echo -e "    -h --help        shows this help"
-    echo -e "    --insert-bashrc  insert the created tenant,datacenter variables at"
-    echo -e "                     ~/.bashrc to be available by openmano config"
-}
-
-function is_valid_uuid(){
-    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
-    echo "$1" | grep -q -E '^[0-9a-f]{32}$' && return 0
-    return 1
-}
-
-#detect if is called with a source to use the 'exit'/'return' command for exiting
-[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit"
-
-#detect if environment variables are set
-fail=""
-[[ -z $VIM_TYPE ]]     && echo "VIM_TYPE variable not defined" >&2 && fail=1
-[[ -z $VIM_USERNAME ]] && echo "VIM_USERNAME variable not defined" >&2 && fail=1
-[[ -z $VIM_PASSWORD ]] && echo "VIM_PASSWORD variable not defined" >&2 && fail=1
-[[ -z $VIM_AUTH_URL ]] && echo "VIM_AUTH_URL variable not defined" >&2 && fail=1
-[[ -z $VIM_TENANT_NAME ]] && [[ -z $VIM_TENANT_NAME ]] && echo "neither VIM_TENANT_NAME not VIM_TENANT_ID variables are not defined" >&2 && fail=1
-[[ -z $VIM_CONFIG ]] && echo "VIM_CONFIG variable not defined" >&2 && fail=1
-[[ -z $VIM_TEST_IMAGE_PATH_LINUX ]] && echo "VIM_TEST_IMAGE_PATH_LINUX variable not defined" >&2 && fail=1
-[[ -z $VIM_TEST_IMAGE_PATH_NFV ]]   && echo "VIM_TEST_IMAGE_PATH_NFV variable not defined" >&2 && fail=1
-[[ -n $fail ]] && $_exit 1
-
-#check correct arguments
-action_list=""
-for param in $*
-do
-   if [[ $param == reset ]] || [[ $param == create ]] || [[ $param == delete ]]
-   then 
-       action_list="$action_list $param"
-   elif [[ $param == -h ]] || [[ $param == --help ]]
-   then
-       usage
-       $_exit 0
-   elif [[ $param == -f ]] || [[ $param == --force ]]
-   then
-       force=y
-   elif [[ $param == --insert-bashrc ]]
-   then
-       insert_bashrc=y
-   else
-       echo "invalid argument '$param'?" &&  usage >&2 && $_exit 1
-   fi
-done
-
-DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
-DIRmano=$(dirname $DIRNAME)
-DIRscript=${DIRmano}/scripts
-#by default action should be reset and create
-[[ -z $action_list ]] && action_list="reset create"
-
-for action in $action_list
-do
-if [[ $action == "reset" ]] 
-then 
-
-    #ask for confirmation if argument is not -f --force
-    [[ $force != y ]] && read -e -p "WARNING: reset openmano database, content will be lost!!! Continue(y/N)" force
-    [[ $force != y ]] && [[ $force != yes ]] && echo "aborted!" && $_exit
-
-    echo "Stopping openmano"
-    $DIRscript/service-openmano mano stop
-    echo "Initializing openmano database"
-    $DIRmano/database_utils/init_mano_db.sh -u mano -p manopw --createdb
-    echo "Starting openmano"
-    $DIRscript/service-openmano mano start
-
-elif [[ $action == "delete" ]]
-then
-    result=`openmano tenant-list TESTVIM-tenant`
-    nfvotenant=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    is_valid_uuid $nfvotenant || ! echo "Tenant TESTVIM-tenant not found. Already delete?" >&2 || $_exit 1
-    export OPENMANO_TENANT=$nfvotenant
-    openmano instance-scenario-delete -f simple-instance     || echo "fail"
-    openmano instance-scenario-delete -f complex2-instance   || echo "fail"
-    openmano instance-scenario-delete -f complex4-instance   || echo "fail"
-    openmano scenario-delete -f simple       || echo "fail"
-    openmano scenario-delete -f complex2     || echo "fail"
-    openmano scenario-delete -f complex3     || echo "fail"
-    openmano scenario-delete -f complex4     || echo "fail"
-    openmano vnf-delete -f linux             || echo "fail"
-    openmano vnf-delete -f linux_2VMs_v02    || echo "fail"
-    openmano vnf-delete -f dataplaneVNF_2VMs || echo "fail"
-    openmano vnf-delete -f dataplaneVNF3     || echo "fail"
-    openmano vnf-delete -f TESTVIM-VNF1          || echo "fail"
-    openmano datacenter-detach TESTVIM-dc        || echo "fail"
-    openmano datacenter-delete -f TESTVIM-dc     || echo "fail"
-    openmano tenant-delete -f TESTVIM-tenant     || echo "fail"
-
-elif [[ $action == "create" ]]
-then 
-
-    printf "%-50s" "Creating openmano tenant 'TESTVIM-tenant': "
-    result=`openmano tenant-create TESTVIM-tenant --description="created by test_vimconn.sh"`
-    nfvotenant=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $nfvotenant && echo "FAIL" && echo "    $result" && $_exit 1 
-    export OPENMANO_TENANT=$nfvotenant
-    [[ $insert_bashrc == y ]] && echo -e "\nexport OPENMANO_TENANT=$nfvotenant"  >> ~/.bashrc
-    echo $nfvotenant
-
-    printf "%-50s" "Creating datacenter 'TESTVIM-dc' in openmano:"
-    URL_ADMIN_PARAM=""
-    [[ -n $VIM_AUTH_URL_ADMIN ]] && URL_ADMIN_PARAM="--url_admin=$VIM_AUTH_URL_ADMIN"
-    result=`openmano datacenter-create TESTVIM-dc "${VIM_AUTH_URL}" "--type=$VIM_TYPE" $URL_ADMIN_PARAM "--config=${VIM_CONFIG}"`
-    datacenter=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && $_exit 1 
-    echo $datacenter
-    export OPENMANO_DATACENTER=$datacenter
-    [[ $insert_bashrc == y ]] && echo -e "\nexport OPENMANO_DATACENTER=$datacenter"  >> ~/.bashrc
-
-    printf "%-50s" "Attaching openmano tenant to the datacenter:"
-    [[ -n $VIM_PASSWORD ]]    && passwd_param="--password=$VIM_PASSWORD"                    || passwd_param=""
-    [[ -n $VIM_TENANT_NAME ]] && vim_tenant_name_param="--vim-tenant-name=$VIM_TENANT_NAME" || vim_tenant_name_param=""
-    [[ -n $VIM_TENANT_ID   ]] && vim_tenant_id_param="--vim-tenant-id=$VIM_TENANT_ID"       || vim_tenant_id_param=""
-    [[ -n $VIM_PASSWORD ]] && passwd_param="--password=$VIM_PASSWORD" || passwd_param=""
-    result=`openmano datacenter-attach TESTVIM-dc "--user=$VIM_USERNAME" "$passwd_param" "$vim_tenant_name_param"`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result" && $_exit 1
-    echo OK
-
-    printf "%-50s" "Updating external nets in openmano: "
-    result=`openmano datacenter-netmap-delete -f --all`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
-    result=`openmano datacenter-netmap-import -f`
-    [[ $? != 0 ]] && echo  "FAIL" && echo "    $result"  && $_exit 1
-    echo OK
-
-    printf "%-50s" "Creating VNF 'linux': "
-    #glance image-create --file=./US1404dpdk.qcow2 --name=US1404dpdk --disk-format=qcow2 --min-disk=2 --is-public=True --container-format=bare
-    #nova image-meta US1404dpdk set location=/mnt/powervault/virtualization/vnfs/os/US1404dpdk.qcow2
-    #glance image-create --file=./US1404user.qcow2 --min-disk=2 --is-public=True --container-format=bare --name=US1404user --disk-format=qcow2
-    #nova image-meta US1404user  set location=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2
-    result=`openmano vnf-create $DIRmano/vnfs/examples/linux.yaml "--image-path=$VIM_TEST_IMAGE_PATH_LINUX"`
-    vnf=`echo $result |gawk '{print $1}'`
-    #check a valid uuid is obtained
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" &&  $_exit 1
-    echo $vnf
-    
-    printf "%-50s" "Creating VNF 1PF,1VF,2GB,4PThreads: "
-    result=`openmano vnf-create "vnf:
-        name: TESTVIM-VNF1
-        external-connections:
-        - name: eth0
-          type: mgmt
-          VNFC: TESTVIM-VNF1-VM
-          local_iface_name: eth0
-        - name: PF0
-          type: data
-          VNFC: TESTVIM-VNF1-VM
-          local_iface_name: PF0
-        - name: VF0
-          type: data
-          VNFC: TESTVIM-VNF1-VM
-          local_iface_name: VF0
-        VNFC: 
-        - name: TESTVIM-VNF1-VM
-          VNFC image: $VIM_TEST_IMAGE_PATH_NFV
-          numas:
-          - paired-threads: 2
-            paired-threads-id: [ [0,2], [1,3] ]
-            memory: 2
-            interfaces:
-            - name:  PF0
-              vpci: '0000:00:11.0'
-              dedicated: 'yes'
-              bandwidth: 10 Gbps
-              mac_address: '20:33:45:56:77:44'
-            - name:  VF0
-              vpci:  '0000:00:12.0'
-              dedicated: 'no'
-              bandwidth: 1 Gbps
-              mac_address: '20:33:45:56:77:45'
-          bridge-ifaces:
-          - name: eth0
-            vpci: '0000:00:09.0'
-            bandwidth: 1 Mbps
-            mac_address: '20:33:45:56:77:46'
-            model: e1000
-       "`
-    vnf=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
-    echo $vnf
-    printf "%-50s" "Creating VNF 'dataplaneVNF_2VMs': "
-    result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF_2VMs.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
-    vnf=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
-    echo $vnf
-    printf "%-50s" "Creating VNF 'dataplaneVNF3.yaml': "
-    result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF3.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV"`
-    vnf=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
-    echo $vnf
-
-    printf "%-50s" "Creating VNF 'dataplaneVNF_2VMs_v02': "
-    result=`openmano vnf-create $DIRmano/vnfs/examples/dataplaneVNF_2VMs_v02.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
-    vnf=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
-    echo $vnf
-
-    printf "%-50s" "Creating VNF 'linux_2VMs_v02': "
-    result=`openmano vnf-create $DIRmano/vnfs/examples/linux_2VMs_v02.yaml "--image-path=$VIM_TEST_IMAGE_PATH_NFV,$VIM_TEST_IMAGE_PATH_NFV"`
-    vnf=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $vnf && echo FAIL && echo "    $result" && $_exit 1
-    echo $vnf
-
-    for sce in simple complex2 complex3 complex4
-    do
-      printf "%-50s" "Creating scenario '$sce':"
-      result=`openmano scenario-create $DIRmano/scenarios/examples/${sce}.yaml`
-      scenario=`echo $result |gawk '{print $1}'`
-      ! is_valid_uuid $scenario && echo FAIL && echo "    $result" &&  $_exit 1
-      echo $scenario
-    done
-    
-    #USER_KEY=""
-    key_param1=""
-    key_param2=""
-    #add user keys if present at .ssh    
-    ls ${HOME}/.ssh/*.pub > /dev/null 2>&1 && key_param1=--keypair-auto
-
-    for sce in simple complex2
-    do 
-      printf "%-50s" "Deploying scenario '$sce':"
-      result=`openmano instance-scenario-create --scenario $sce --name ${sce}-instance "$key_param1" "$key_param2"`
-      instance=`echo $result |gawk '{print $1}'`
-      ! is_valid_uuid $instance && echo FAIL && echo "    $result" && $_exit 1
-      echo $instance
-    done
-
-    #Testing IP parameters in networks
-    printf "%-50s" "Deploying scenario 'complex4' with IP parameters in networks:"
-    result=`openmano instance-scenario-create $DIRmano/instance-scenarios/examples/instance-creation-complex4.yaml "$key_param1" "$key_param2"`
-    instance=`echo $result |gawk '{print $1}'`
-    ! is_valid_uuid $instance && echo FAIL && echo "    $result" && $_exit 1
-    echo $instance
-
-    echo
-    echo DONE
-fi
-done
-
diff --git a/tox.ini b/tox.ini
deleted file mode 100644 (file)
index e451bb8..0000000
--- a/tox.ini
+++ /dev/null
@@ -1,23 +0,0 @@
-[tox]
-#envlist = py27,py3
-envlist = py27
-toxworkdir={homedir}/.tox
-
-[testenv]
-deps=nose
-     mock
-commands=nosetests
-
-[testenv:flake8]
-basepython = python
-deps = flake8
-# TODO for the moment few files are tested.
-commands = flake8 osm_ro/wim  --max-line-length 120 \
-    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
-
-[testenv:build]
-basepython = python
-deps = stdeb
-       setuptools-version-command
-commands = python setup.py --command-packages=stdeb.command bdist_deb
-